diff --git a/.generator/schemas/v2/openapi.yaml b/.generator/schemas/v2/openapi.yaml index d235bdcd3d..596a5ac893 100644 --- a/.generator/schemas/v2/openapi.yaml +++ b/.generator/schemas/v2/openapi.yaml @@ -3851,6 +3851,276 @@ components: example: "90646597-5fdb-4a17-a240-647003f8c028" format: uuid type: string + ApmDependencyStatName: + description: The APM dependency statistic to query. + enum: + - avg_duration + - avg_root_duration + - avg_spans_per_trace + - error_rate + - pct_exec_time + - pct_of_traces + - total_traces_count + example: avg_duration + type: string + x-enum-varnames: + - AVG_DURATION + - AVG_ROOT_DURATION + - AVG_SPANS_PER_TRACE + - ERROR_RATE + - PCT_EXEC_TIME + - PCT_OF_TRACES + - TOTAL_TRACES_COUNT + ApmDependencyStatsDataSource: + default: apm_dependency_stats + description: A data source for APM dependency statistics queries. + enum: + - apm_dependency_stats + example: apm_dependency_stats + type: string + x-enum-varnames: + - APM_DEPENDENCY_STATS + ApmDependencyStatsQuery: + description: An individual APM dependency stats query. + properties: + data_source: + $ref: "#/components/schemas/ApmDependencyStatsDataSource" + env: + description: The environment to query. + example: prod + type: string + is_upstream: + description: Determines whether stats for upstream or downstream dependencies should be queried. + example: true + type: boolean + name: + description: The variable name for use in formulas. + example: query1 + type: string + operation_name: + description: The APM operation name. + example: web.request + type: string + primary_tag_name: + description: The name of the second primary tag used within APM; required when `primary_tag_value` is specified. See https://docs.datadoghq.com/tracing/guide/setting_primary_tags_to_scope/#add-a-second-primary-tag-in-datadog. + example: datacenter + type: string + primary_tag_value: + description: Filter APM data by the second primary tag. `primary_tag_name` must also be specified. + example: us-east-1 + type: string + resource_name: + description: The resource name to filter by. + example: "" + type: string + service: + description: The service name to filter by. + example: web-store + type: string + stat: + $ref: "#/components/schemas/ApmDependencyStatName" + required: + - data_source + - name + - env + - operation_name + - resource_name + - service + - stat + type: object + ApmMetricsDataSource: + default: apm_metrics + description: A data source for APM metrics queries. + enum: + - apm_metrics + example: apm_metrics + type: string + x-enum-varnames: + - APM_METRICS + ApmMetricsQuery: + description: An individual APM metrics query. + properties: + data_source: + $ref: "#/components/schemas/ApmMetricsDataSource" + group_by: + description: Optional fields to group the query results by. + items: + description: A field to group results by. + example: service + type: string + type: array + name: + description: The variable name for use in formulas. + example: query1 + type: string + operation_mode: + description: Optional operation mode to aggregate across operation names. + example: "primary" + type: string + operation_name: + description: Name of operation on service. If not provided, the primary operation name is used. + example: web.request + type: string + peer_tags: + description: Tags to query for a specific downstream entity (peer.service, peer.db_instance, peer.s3, peer.s3.bucket, etc.). + items: + description: A peer tag value. + example: "peer.service:my-service" + type: string + type: array + query_filter: + description: Additional filters for the query using metrics query syntax (e.g., env, primary_tag). + example: "env:prod" + type: string + resource_hash: + description: The resource hash for exact matching. + example: "abc123" + type: string + resource_name: + description: The full name of a specific resource to filter by. + example: "GET /api/v1/users" + type: string + service: + description: The service name to filter by. + example: web-store + type: string + span_kind: + description: Describes the relationship between the span, its parents, and its children in a trace. Known values include consumer, server, client, producer, internal. + example: server + type: string + stat: + $ref: "#/components/schemas/ApmMetricsStat" + required: + - data_source + - name + - stat + type: object + ApmMetricsStat: + description: The APM metric statistic to query. + enum: + - error_rate + - errors + - errors_per_second + - hits + - hits_per_second + - apdex + - latency_avg + - latency_max + - latency_p50 + - latency_p75 + - latency_p90 + - latency_p95 + - latency_p99 + - latency_p999 + - latency_distribution + - total_time + example: latency_p99 + type: string + x-enum-varnames: + - ERROR_RATE + - ERRORS + - ERRORS_PER_SECOND + - HITS + - HITS_PER_SECOND + - APDEX + - LATENCY_AVG + - LATENCY_MAX + - LATENCY_P50 + - LATENCY_P75 + - LATENCY_P90 + - LATENCY_P95 + - LATENCY_P99 + - LATENCY_P999 + - LATENCY_DISTRIBUTION + - TOTAL_TIME + ApmResourceStatName: + description: The APM resource statistic to query. + enum: + - error_rate + - errors + - hits + - latency_avg + - latency_max + - latency_p50 + - latency_p75 + - latency_p90 + - latency_p95 + - latency_p99 + - latency_distribution + - total_time + example: latency_p95 + type: string + x-enum-varnames: + - ERROR_RATE + - ERRORS + - HITS + - LATENCY_AVG + - LATENCY_MAX + - LATENCY_P50 + - LATENCY_P75 + - LATENCY_P90 + - LATENCY_P95 + - LATENCY_P99 + - LATENCY_DISTRIBUTION + - TOTAL_TIME + ApmResourceStatsDataSource: + default: apm_resource_stats + description: A data source for APM resource statistics queries. + enum: + - apm_resource_stats + example: apm_resource_stats + type: string + x-enum-varnames: + - APM_RESOURCE_STATS + ApmResourceStatsQuery: + description: An individual APM resource stats query. + properties: + data_source: + $ref: "#/components/schemas/ApmResourceStatsDataSource" + env: + description: The environment to query. + example: prod + type: string + group_by: + description: Tag keys to group results by. + items: + description: A tag key to group by. + example: resource_name + type: string + type: array + name: + description: The variable name for use in formulas. + example: query1 + type: string + operation_name: + description: The APM operation name. + example: web.request + type: string + primary_tag_name: + description: Name of the second primary tag used within APM. Required when `primary_tag_value` is specified. See https://docs.datadoghq.com/tracing/guide/setting_primary_tags_to_scope/#add-a-second-primary-tag-in-datadog + example: datacenter + type: string + primary_tag_value: + description: Value of the second primary tag by which to filter APM data. `primary_tag_name` must also be specified. + example: us-east-1 + type: string + resource_name: + description: The resource name to filter by. + example: "Admin::ProductsController#create" + type: string + service: + description: The service name to filter by. + example: web-store + type: string + stat: + $ref: "#/components/schemas/ApmResourceStatName" + required: + - data_source + - name + - env + - service + - stat + type: object ApmRetentionFilterType: default: apm_retention_filter description: The type of the resource. @@ -13939,6 +14209,15 @@ components: type: string type: array type: object + ContainerDataSource: + default: container + description: A data source for container-level infrastructure metrics. + enum: + - container + example: container + type: string + x-enum-varnames: + - CONTAINER ContainerGroup: description: Container group object. properties: @@ -14350,6 +14629,86 @@ components: type: string x-enum-varnames: - CURSOR_LIMIT + ContainerScalarQuery: + description: An individual scalar container query. + properties: + aggregator: + description: The temporal reduction function to apply. + example: avg + type: string + data_source: + $ref: "#/components/schemas/ContainerDataSource" + is_normalized_cpu: + description: Whether CPU metrics should be normalized by core count. + type: boolean + limit: + description: Maximum number of timeseries to return. + format: int64 + type: integer + metric: + description: The container metric to query. + example: process.stat.container.cpu.total_pct + type: string + name: + description: The variable name for use in formulas. + example: query1 + type: string + sort: + description: Sort order for the results. + type: string + tag_filters: + description: Tag filters to narrow down containers. + items: + description: A tag filter value. + example: "env:prod" + type: string + type: array + text_filter: + description: A full-text search filter to match container names. + type: string + required: + - data_source + - name + - metric + type: object + ContainerTimeseriesQuery: + description: An individual timeseries container query. + properties: + data_source: + $ref: "#/components/schemas/ContainerDataSource" + is_normalized_cpu: + description: Whether CPU metrics should be normalized by core count. + type: boolean + limit: + description: Maximum number of timeseries to return. + format: int64 + type: integer + metric: + description: The container metric to query. + example: process.stat.container.cpu.total_pct + type: string + name: + description: The variable name for use in formulas. + example: query1 + type: string + sort: + description: Sort order for the results. + type: string + tag_filters: + description: Tag filters to narrow down containers. + items: + description: A tag filter value. + example: "env:prod" + type: string + type: array + text_filter: + description: A full-text search filter to match container names. + type: string + required: + - data_source + - name + - metric + type: object ContainerType: default: container description: Type of container. @@ -24959,13 +25318,35 @@ components: description: A data source that is powered by the Events Platform. enum: - logs + - spans + - network - rum + - security_signals + - profiles + - audit + - events + - ci_tests + - ci_pipelines + - incident_analytics + - product_analytics + - on_call_events - dora example: logs type: string x-enum-varnames: - LOGS + - SPANS + - NETWORK - RUM + - SECURITY_SIGNALS + - PROFILES + - AUDIT + - EVENTS + - CI_TESTS + - CI_PIPELINES + - INCIDENT_ANALYTICS + - PRODUCT_ANALYTICS + - ON_CALL_EVENTS - DORA EventsGroupBy: description: A dimension on which to split a query's results. @@ -50256,6 +50637,57 @@ components: required: - type type: object + ProcessDataSource: + default: process + description: A data source for process-level infrastructure metrics. + enum: + - process + example: process + type: string + x-enum-varnames: + - PROCESS + ProcessScalarQuery: + description: An individual scalar process query. + properties: + aggregator: + description: The temporal reduction function to apply. + example: avg + type: string + data_source: + $ref: "#/components/schemas/ProcessDataSource" + is_normalized_cpu: + description: Whether CPU metrics should be normalized by core count. + type: boolean + limit: + description: Maximum number of timeseries to return. + format: int64 + type: integer + metric: + description: The process metric to query. + example: process.stat.cpu.total_pct + type: string + name: + description: The variable name for use in formulas. + example: query1 + type: string + sort: + description: Sort order for the results. + type: string + tag_filters: + description: Tag filters to narrow down processes. + items: + description: A tag filter value. + example: "env:prod" + type: string + type: array + text_filter: + description: A full-text search filter to match process names or commands. + type: string + required: + - data_source + - name + - metric + type: object ProcessSummariesMeta: description: Response metadata object. properties: @@ -50342,6 +50774,44 @@ components: type: string x-enum-varnames: - PROCESS + ProcessTimeseriesQuery: + description: An individual timeseries process query. + properties: + data_source: + $ref: "#/components/schemas/ProcessDataSource" + is_normalized_cpu: + description: Whether CPU metrics should be normalized by core count. + type: boolean + limit: + description: Maximum number of timeseries to return. + format: int64 + type: integer + metric: + description: The process metric to query. + example: process.stat.cpu.total_pct + type: string + name: + description: The variable name for use in formulas. + example: query1 + type: string + sort: + description: Sort order for the results. + type: string + tag_filters: + description: Tag filters to narrow down processes. + items: + description: A tag filter value. + example: "env:prod" + type: string + type: array + text_filter: + description: A full-text search filter to match process names or commands. + type: string + required: + - data_source + - name + - metric + type: object ProductAnalyticsAnalyticsQuery: description: The analytics query definition containing a base query, compute rule, and optional grouping. properties: @@ -56545,6 +57015,12 @@ components: oneOf: - $ref: "#/components/schemas/MetricsScalarQuery" - $ref: "#/components/schemas/EventsScalarQuery" + - $ref: "#/components/schemas/ApmResourceStatsQuery" + - $ref: "#/components/schemas/ApmMetricsQuery" + - $ref: "#/components/schemas/ApmDependencyStatsQuery" + - $ref: "#/components/schemas/SloQuery" + - $ref: "#/components/schemas/ProcessScalarQuery" + - $ref: "#/components/schemas/ContainerScalarQuery" ScalarResponse: description: A message containing the response to a scalar query. properties: @@ -63919,6 +64395,51 @@ components: required: - slackTrigger type: object + SloDataSource: + default: slo + description: A data source for SLO queries. + enum: + - slo + example: slo + type: string + x-enum-varnames: + - SLO + SloQuery: + description: An individual SLO query. + example: + additional_query_filters: "*" + data_source: "slo" + group_mode: "overall" + measure: "good_events" + name: "my_slo" + slo_id: "12345678910" + slo_query_type: "metric" + properties: + additional_query_filters: + description: Additional filters applied to the SLO query. + example: "host:host_a,env:prod" + type: string + data_source: + $ref: "#/components/schemas/SloDataSource" + group_mode: + $ref: "#/components/schemas/SlosGroupMode" + measure: + $ref: "#/components/schemas/SlosMeasure" + name: + description: The variable name for use in formulas. + example: query1 + type: string + slo_id: + description: The unique identifier of the SLO to query. + example: "a]b123c45de6f78g90h" + type: string + slo_query_type: + $ref: "#/components/schemas/SlosQueryType" + required: + - data_source + - slo_id + - measure + type: object SloReportCreateRequest: description: The SLO report request body. properties: @@ -64026,6 +64547,54 @@ components: type: string x-enum-varnames: - SLO_STATUS + SlosGroupMode: + description: How SLO results are grouped in the response. + enum: + - overall + - components + example: overall + type: string + x-enum-varnames: + - OVERALL + - COMPONENTS + SlosMeasure: + description: The SLO measurement to retrieve. + enum: + - good_events + - bad_events + - slo_status + - error_budget_remaining + - error_budget_remaining_history + - error_budget_burndown + - burn_rate + - slo_status_history + - good_minutes + - bad_minutes + example: slo_status + type: string + x-enum-varnames: + - GOOD_EVENTS + - BAD_EVENTS + - SLO_STATUS + - ERROR_BUDGET_REMAINING + - ERROR_BUDGET_REMAINING_HISTORY + - ERROR_BUDGET_BURNDOWN + - BURN_RATE + - SLO_STATUS_HISTORY + - GOOD_MINUTES + - BAD_MINUTES + SlosQueryType: + description: The type of SLO definition being queried. + enum: + - metric + - time_slice + - monitor + example: metric + type: string + x-enum-varnames: + - METRIC + - TIME_SLICE + - MONITOR Snapshot: description: A single heatmap snapshot resource returned by create or update operations. properties: @@ -70806,6 +71375,12 @@ components: oneOf: - $ref: "#/components/schemas/MetricsTimeseriesQuery" - $ref: "#/components/schemas/EventsTimeseriesQuery" + - $ref: "#/components/schemas/ApmResourceStatsQuery" + - $ref: "#/components/schemas/ApmMetricsQuery" + - $ref: "#/components/schemas/ApmDependencyStatsQuery" + - $ref: "#/components/schemas/SloQuery" + - $ref: "#/components/schemas/ProcessTimeseriesQuery" + - $ref: "#/components/schemas/ContainerTimeseriesQuery" TimeseriesResponse: description: A message containing the response to a timeseries query. properties: diff --git a/docs/datadog_api_client.v2.model.rst b/docs/datadog_api_client.v2.model.rst index e714684121..64b015e38c 100644 --- a/docs/datadog_api_client.v2.model.rst +++ b/docs/datadog_api_client.v2.model.rst @@ -606,6 +606,69 @@ datadog\_api\_client.v2.model.api\_trigger\_wrapper module :members: :show-inheritance: +datadog\_api\_client.v2.model.apm\_dependency\_stat\_name module +---------------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.apm_dependency_stat_name + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.apm\_dependency\_stats\_data\_source module +------------------------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.apm_dependency_stats_data_source + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.apm\_dependency\_stats\_query module +------------------------------------------------------------------ + +.. automodule:: datadog_api_client.v2.model.apm_dependency_stats_query + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.apm\_metrics\_data\_source module +--------------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.apm_metrics_data_source + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.apm\_metrics\_query module +-------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.apm_metrics_query + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.apm\_metrics\_stat module +------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.apm_metrics_stat + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.apm\_resource\_stat\_name module +-------------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.apm_resource_stat_name + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.apm\_resource\_stats\_data\_source module +----------------------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.apm_resource_stats_data_source + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.apm\_resource\_stats\_query module +---------------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.apm_resource_stats_query + :members: + :show-inheritance: + datadog\_api\_client.v2.model.apm\_retention\_filter\_type module ----------------------------------------------------------------- @@ -5667,6 +5730,13 @@ datadog\_api\_client.v2.model.container\_attributes module :members: :show-inheritance: +datadog\_api\_client.v2.model.container\_data\_source module +------------------------------------------------------------ + +.. automodule:: datadog_api_client.v2.model.container_data_source + :members: + :show-inheritance: + datadog\_api\_client.v2.model.container\_group module ----------------------------------------------------- @@ -5856,6 +5926,20 @@ datadog\_api\_client.v2.model.container\_meta\_page\_type module :members: :show-inheritance: +datadog\_api\_client.v2.model.container\_scalar\_query module +------------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.container_scalar_query + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.container\_timeseries\_query module +----------------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.container_timeseries_query + :members: + :show-inheritance: + datadog\_api\_client.v2.model.container\_type module ---------------------------------------------------- @@ -22404,6 +22488,20 @@ datadog\_api\_client.v2.model.preview\_entity\_response\_data module :members: :show-inheritance: +datadog\_api\_client.v2.model.process\_data\_source module +---------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.process_data_source + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.process\_scalar\_query module +----------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.process_scalar_query + :members: + :show-inheritance: + datadog\_api\_client.v2.model.process\_summaries\_meta module ------------------------------------------------------------- @@ -22446,6 +22544,13 @@ datadog\_api\_client.v2.model.process\_summary\_type module :members: :show-inheritance: +datadog\_api\_client.v2.model.process\_timeseries\_query module +--------------------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.process_timeseries_query + :members: + :show-inheritance: + datadog\_api\_client.v2.model.product\_analytics\_analytics\_query module ------------------------------------------------------------------------- @@ -28620,6 +28725,20 @@ datadog\_api\_client.v2.model.slack\_trigger\_wrapper module :members: :show-inheritance: +datadog\_api\_client.v2.model.slo\_data\_source module +------------------------------------------------------ + +.. automodule:: datadog_api_client.v2.model.slo_data_source + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.slo\_query module +----------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.slo_query + :members: + :show-inheritance: + datadog\_api\_client.v2.model.slo\_report\_create\_request module ----------------------------------------------------------------- @@ -28718,6 +28837,27 @@ datadog\_api\_client.v2.model.slo\_status\_type module :members: :show-inheritance: +datadog\_api\_client.v2.model.slos\_group\_mode module +------------------------------------------------------ + +.. automodule:: datadog_api_client.v2.model.slos_group_mode + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.slos\_measure module +-------------------------------------------------- + +.. automodule:: datadog_api_client.v2.model.slos_measure + :members: + :show-inheritance: + +datadog\_api\_client.v2.model.slos\_query\_type module +------------------------------------------------------ + +.. automodule:: datadog_api_client.v2.model.slos_query_type + :members: + :show-inheritance: + datadog\_api\_client.v2.model.snapshot module --------------------------------------------- diff --git a/examples/v2/metrics/QueryScalarData_1479548882.py b/examples/v2/metrics/QueryScalarData_1479548882.py new file mode 100644 index 0000000000..de7bb9fb67 --- /dev/null +++ b/examples/v2/metrics/QueryScalarData_1479548882.py @@ -0,0 +1,60 @@ +""" +Scalar cross product query with apm_resource_stats data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.apm_resource_stat_name import ApmResourceStatName +from datadog_api_client.v2.model.apm_resource_stats_data_source import ApmResourceStatsDataSource +from datadog_api_client.v2.model.apm_resource_stats_query import ApmResourceStatsQuery +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.scalar_formula_query_request import ScalarFormulaQueryRequest +from datadog_api_client.v2.model.scalar_formula_request import ScalarFormulaRequest +from datadog_api_client.v2.model.scalar_formula_request_attributes import ScalarFormulaRequestAttributes +from datadog_api_client.v2.model.scalar_formula_request_queries import ScalarFormulaRequestQueries +from datadog_api_client.v2.model.scalar_formula_request_type import ScalarFormulaRequestType + +body = ScalarFormulaQueryRequest( + data=ScalarFormulaRequest( + attributes=ScalarFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + queries=ScalarFormulaRequestQueries( + [ + ApmResourceStatsQuery( + data_source=ApmResourceStatsDataSource.APM_RESOURCE_STATS, + name="a", + env="staging", + service="azure-bill-import", + stat=ApmResourceStatName.HITS, + operation_name="cassandra.query", + group_by=[ + "resource_name", + ], + primary_tag_name="datacenter", + primary_tag_value="*", + ), + ] + ), + to=1636629071000, + ), + type=ScalarFormulaRequestType.SCALAR_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_scalar_data(body=body) + + print(response) diff --git a/examples/v2/metrics/QueryScalarData_1904811219.py b/examples/v2/metrics/QueryScalarData_1904811219.py new file mode 100644 index 0000000000..bc43c598d8 --- /dev/null +++ b/examples/v2/metrics/QueryScalarData_1904811219.py @@ -0,0 +1,57 @@ +""" +Scalar cross product query with apm_metrics data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.apm_metrics_data_source import ApmMetricsDataSource +from datadog_api_client.v2.model.apm_metrics_query import ApmMetricsQuery +from datadog_api_client.v2.model.apm_metrics_stat import ApmMetricsStat +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.scalar_formula_query_request import ScalarFormulaQueryRequest +from datadog_api_client.v2.model.scalar_formula_request import ScalarFormulaRequest +from datadog_api_client.v2.model.scalar_formula_request_attributes import ScalarFormulaRequestAttributes +from datadog_api_client.v2.model.scalar_formula_request_queries import ScalarFormulaRequestQueries +from datadog_api_client.v2.model.scalar_formula_request_type import ScalarFormulaRequestType + +body = ScalarFormulaQueryRequest( + data=ScalarFormulaRequest( + attributes=ScalarFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + queries=ScalarFormulaRequestQueries( + [ + ApmMetricsQuery( + data_source=ApmMetricsDataSource.APM_METRICS, + name="a", + stat=ApmMetricsStat.HITS, + service="web-store", + query_filter="env:prod", + group_by=[ + "resource_name", + ], + ), + ] + ), + to=1636629071000, + ), + type=ScalarFormulaRequestType.SCALAR_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_scalar_data(body=body) + + print(response) diff --git a/examples/v2/metrics/QueryScalarData_2298288525.py b/examples/v2/metrics/QueryScalarData_2298288525.py new file mode 100644 index 0000000000..97339cce71 --- /dev/null +++ b/examples/v2/metrics/QueryScalarData_2298288525.py @@ -0,0 +1,58 @@ +""" +Scalar cross product query with slo data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.scalar_formula_query_request import ScalarFormulaQueryRequest +from datadog_api_client.v2.model.scalar_formula_request import ScalarFormulaRequest +from datadog_api_client.v2.model.scalar_formula_request_attributes import ScalarFormulaRequestAttributes +from datadog_api_client.v2.model.scalar_formula_request_queries import ScalarFormulaRequestQueries +from datadog_api_client.v2.model.scalar_formula_request_type import ScalarFormulaRequestType +from datadog_api_client.v2.model.slo_data_source import SloDataSource +from datadog_api_client.v2.model.slo_query import SloQuery +from datadog_api_client.v2.model.slos_group_mode import SlosGroupMode +from datadog_api_client.v2.model.slos_measure import SlosMeasure +from datadog_api_client.v2.model.slos_query_type import SlosQueryType + +body = ScalarFormulaQueryRequest( + data=ScalarFormulaRequest( + attributes=ScalarFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + queries=ScalarFormulaRequestQueries( + [ + SloQuery( + data_source=SloDataSource.SLO, + name="a", + slo_id="12345678910", + measure=SlosMeasure.SLO_STATUS, + slo_query_type=SlosQueryType.METRIC, + group_mode=SlosGroupMode.OVERALL, + additional_query_filters="*", + ), + ] + ), + to=1636629071000, + ), + type=ScalarFormulaRequestType.SCALAR_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_scalar_data(body=body) + + print(response) diff --git a/examples/v2/metrics/QueryScalarData_2533499017.py b/examples/v2/metrics/QueryScalarData_2533499017.py new file mode 100644 index 0000000000..60b7b9f7f5 --- /dev/null +++ b/examples/v2/metrics/QueryScalarData_2533499017.py @@ -0,0 +1,58 @@ +""" +Scalar cross product query with apm_dependency_stats data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.apm_dependency_stat_name import ApmDependencyStatName +from datadog_api_client.v2.model.apm_dependency_stats_data_source import ApmDependencyStatsDataSource +from datadog_api_client.v2.model.apm_dependency_stats_query import ApmDependencyStatsQuery +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.scalar_formula_query_request import ScalarFormulaQueryRequest +from datadog_api_client.v2.model.scalar_formula_request import ScalarFormulaRequest +from datadog_api_client.v2.model.scalar_formula_request_attributes import ScalarFormulaRequestAttributes +from datadog_api_client.v2.model.scalar_formula_request_queries import ScalarFormulaRequestQueries +from datadog_api_client.v2.model.scalar_formula_request_type import ScalarFormulaRequestType + +body = ScalarFormulaQueryRequest( + data=ScalarFormulaRequest( + attributes=ScalarFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + queries=ScalarFormulaRequestQueries( + [ + ApmDependencyStatsQuery( + data_source=ApmDependencyStatsDataSource.APM_DEPENDENCY_STATS, + name="a", + env="ci", + service="cassandra", + stat=ApmDependencyStatName.AVG_DURATION, + operation_name="cassandra.query", + resource_name="DELETE FROM monitor_history.monitor_state_change_history WHERE org_id = ? AND monitor_id IN ? AND group = ?", + primary_tag_name="datacenter", + primary_tag_value="edge-eu1.prod.dog", + ), + ] + ), + to=1636629071000, + ), + type=ScalarFormulaRequestType.SCALAR_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_scalar_data(body=body) + + print(response) diff --git a/examples/v2/metrics/QueryScalarData_4230617918.py b/examples/v2/metrics/QueryScalarData_4230617918.py new file mode 100644 index 0000000000..cd3793e18e --- /dev/null +++ b/examples/v2/metrics/QueryScalarData_4230617918.py @@ -0,0 +1,57 @@ +""" +Scalar cross product query with process data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.process_data_source import ProcessDataSource +from datadog_api_client.v2.model.process_scalar_query import ProcessScalarQuery +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.scalar_formula_query_request import ScalarFormulaQueryRequest +from datadog_api_client.v2.model.scalar_formula_request import ScalarFormulaRequest +from datadog_api_client.v2.model.scalar_formula_request_attributes import ScalarFormulaRequestAttributes +from datadog_api_client.v2.model.scalar_formula_request_queries import ScalarFormulaRequestQueries +from datadog_api_client.v2.model.scalar_formula_request_type import ScalarFormulaRequestType + +body = ScalarFormulaQueryRequest( + data=ScalarFormulaRequest( + attributes=ScalarFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + queries=ScalarFormulaRequestQueries( + [ + ProcessScalarQuery( + data_source=ProcessDataSource.PROCESS, + name="a", + metric="process.stat.cpu.total_pct", + aggregator="avg", + text_filter="", + tag_filters=[], + limit=10, + sort="desc", + is_normalized_cpu=False, + ), + ] + ), + to=1636629071000, + ), + type=ScalarFormulaRequestType.SCALAR_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_scalar_data(body=body) + + print(response) diff --git a/examples/v2/metrics/QueryScalarData_891952130.py b/examples/v2/metrics/QueryScalarData_891952130.py new file mode 100644 index 0000000000..17af326c81 --- /dev/null +++ b/examples/v2/metrics/QueryScalarData_891952130.py @@ -0,0 +1,55 @@ +""" +Scalar cross product query with container data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.container_data_source import ContainerDataSource +from datadog_api_client.v2.model.container_scalar_query import ContainerScalarQuery +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.scalar_formula_query_request import ScalarFormulaQueryRequest +from datadog_api_client.v2.model.scalar_formula_request import ScalarFormulaRequest +from datadog_api_client.v2.model.scalar_formula_request_attributes import ScalarFormulaRequestAttributes +from datadog_api_client.v2.model.scalar_formula_request_queries import ScalarFormulaRequestQueries +from datadog_api_client.v2.model.scalar_formula_request_type import ScalarFormulaRequestType + +body = ScalarFormulaQueryRequest( + data=ScalarFormulaRequest( + attributes=ScalarFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + queries=ScalarFormulaRequestQueries( + [ + ContainerScalarQuery( + data_source=ContainerDataSource.CONTAINER, + name="a", + metric="process.stat.container.cpu.system_pct", + aggregator="avg", + tag_filters=[], + limit=10, + sort="desc", + ), + ] + ), + to=1636629071000, + ), + type=ScalarFormulaRequestType.SCALAR_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_scalar_data(body=body) + + print(response) diff --git a/examples/v2/metrics/QueryTimeseriesData_108927825.py b/examples/v2/metrics/QueryTimeseriesData_108927825.py new file mode 100644 index 0000000000..393f204a83 --- /dev/null +++ b/examples/v2/metrics/QueryTimeseriesData_108927825.py @@ -0,0 +1,59 @@ +""" +Timeseries cross product query with slo data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.slo_data_source import SloDataSource +from datadog_api_client.v2.model.slo_query import SloQuery +from datadog_api_client.v2.model.slos_group_mode import SlosGroupMode +from datadog_api_client.v2.model.slos_measure import SlosMeasure +from datadog_api_client.v2.model.slos_query_type import SlosQueryType +from datadog_api_client.v2.model.timeseries_formula_query_request import TimeseriesFormulaQueryRequest +from datadog_api_client.v2.model.timeseries_formula_request import TimeseriesFormulaRequest +from datadog_api_client.v2.model.timeseries_formula_request_attributes import TimeseriesFormulaRequestAttributes +from datadog_api_client.v2.model.timeseries_formula_request_queries import TimeseriesFormulaRequestQueries +from datadog_api_client.v2.model.timeseries_formula_request_type import TimeseriesFormulaRequestType + +body = TimeseriesFormulaQueryRequest( + data=TimeseriesFormulaRequest( + attributes=TimeseriesFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + interval=5000, + queries=TimeseriesFormulaRequestQueries( + [ + SloQuery( + data_source=SloDataSource.SLO, + name="a", + slo_id="12345678910", + measure=SlosMeasure.SLO_STATUS, + slo_query_type=SlosQueryType.METRIC, + group_mode=SlosGroupMode.OVERALL, + additional_query_filters="*", + ), + ] + ), + to=1636629071000, + ), + type=TimeseriesFormulaRequestType.TIMESERIES_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_timeseries_data(body=body) + + print(response) diff --git a/examples/v2/metrics/QueryTimeseriesData_2159746306.py b/examples/v2/metrics/QueryTimeseriesData_2159746306.py new file mode 100644 index 0000000000..2dc9ef7c41 --- /dev/null +++ b/examples/v2/metrics/QueryTimeseriesData_2159746306.py @@ -0,0 +1,57 @@ +""" +Timeseries cross product query with process data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.process_data_source import ProcessDataSource +from datadog_api_client.v2.model.process_timeseries_query import ProcessTimeseriesQuery +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.timeseries_formula_query_request import TimeseriesFormulaQueryRequest +from datadog_api_client.v2.model.timeseries_formula_request import TimeseriesFormulaRequest +from datadog_api_client.v2.model.timeseries_formula_request_attributes import TimeseriesFormulaRequestAttributes +from datadog_api_client.v2.model.timeseries_formula_request_queries import TimeseriesFormulaRequestQueries +from datadog_api_client.v2.model.timeseries_formula_request_type import TimeseriesFormulaRequestType + +body = TimeseriesFormulaQueryRequest( + data=TimeseriesFormulaRequest( + attributes=TimeseriesFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + interval=5000, + queries=TimeseriesFormulaRequestQueries( + [ + ProcessTimeseriesQuery( + data_source=ProcessDataSource.PROCESS, + name="a", + metric="process.stat.cpu.total_pct", + text_filter="", + tag_filters=[], + limit=10, + sort="desc", + is_normalized_cpu=False, + ), + ] + ), + to=1636629071000, + ), + type=TimeseriesFormulaRequestType.TIMESERIES_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_timeseries_data(body=body) + + print(response) diff --git a/examples/v2/metrics/QueryTimeseriesData_3174309318.py b/examples/v2/metrics/QueryTimeseriesData_3174309318.py new file mode 100644 index 0000000000..0aedf169a9 --- /dev/null +++ b/examples/v2/metrics/QueryTimeseriesData_3174309318.py @@ -0,0 +1,55 @@ +""" +Timeseries cross product query with container data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.container_data_source import ContainerDataSource +from datadog_api_client.v2.model.container_timeseries_query import ContainerTimeseriesQuery +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.timeseries_formula_query_request import TimeseriesFormulaQueryRequest +from datadog_api_client.v2.model.timeseries_formula_request import TimeseriesFormulaRequest +from datadog_api_client.v2.model.timeseries_formula_request_attributes import TimeseriesFormulaRequestAttributes +from datadog_api_client.v2.model.timeseries_formula_request_queries import TimeseriesFormulaRequestQueries +from datadog_api_client.v2.model.timeseries_formula_request_type import TimeseriesFormulaRequestType + +body = TimeseriesFormulaQueryRequest( + data=TimeseriesFormulaRequest( + attributes=TimeseriesFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + interval=5000, + queries=TimeseriesFormulaRequestQueries( + [ + ContainerTimeseriesQuery( + data_source=ContainerDataSource.CONTAINER, + name="a", + metric="process.stat.container.cpu.system_pct", + tag_filters=[], + limit=10, + sort="desc", + ), + ] + ), + to=1636629071000, + ), + type=TimeseriesFormulaRequestType.TIMESERIES_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_timeseries_data(body=body) + + print(response) diff --git a/examples/v2/metrics/QueryTimeseriesData_4028506518.py b/examples/v2/metrics/QueryTimeseriesData_4028506518.py new file mode 100644 index 0000000000..a9541707e5 --- /dev/null +++ b/examples/v2/metrics/QueryTimeseriesData_4028506518.py @@ -0,0 +1,61 @@ +""" +Timeseries cross product query with apm_resource_stats data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.apm_resource_stat_name import ApmResourceStatName +from datadog_api_client.v2.model.apm_resource_stats_data_source import ApmResourceStatsDataSource +from datadog_api_client.v2.model.apm_resource_stats_query import ApmResourceStatsQuery +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.timeseries_formula_query_request import TimeseriesFormulaQueryRequest +from datadog_api_client.v2.model.timeseries_formula_request import TimeseriesFormulaRequest +from datadog_api_client.v2.model.timeseries_formula_request_attributes import TimeseriesFormulaRequestAttributes +from datadog_api_client.v2.model.timeseries_formula_request_queries import TimeseriesFormulaRequestQueries +from datadog_api_client.v2.model.timeseries_formula_request_type import TimeseriesFormulaRequestType + +body = TimeseriesFormulaQueryRequest( + data=TimeseriesFormulaRequest( + attributes=TimeseriesFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + interval=5000, + queries=TimeseriesFormulaRequestQueries( + [ + ApmResourceStatsQuery( + data_source=ApmResourceStatsDataSource.APM_RESOURCE_STATS, + name="a", + env="staging", + service="azure-bill-import", + stat=ApmResourceStatName.HITS, + operation_name="cassandra.query", + group_by=[ + "resource_name", + ], + primary_tag_name="datacenter", + primary_tag_value="*", + ), + ] + ), + to=1636629071000, + ), + type=TimeseriesFormulaRequestType.TIMESERIES_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_timeseries_data(body=body) + + print(response) diff --git a/examples/v2/metrics/QueryTimeseriesData_4246412951.py b/examples/v2/metrics/QueryTimeseriesData_4246412951.py new file mode 100644 index 0000000000..b21021af22 --- /dev/null +++ b/examples/v2/metrics/QueryTimeseriesData_4246412951.py @@ -0,0 +1,58 @@ +""" +Timeseries cross product query with apm_metrics data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.apm_metrics_data_source import ApmMetricsDataSource +from datadog_api_client.v2.model.apm_metrics_query import ApmMetricsQuery +from datadog_api_client.v2.model.apm_metrics_stat import ApmMetricsStat +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.timeseries_formula_query_request import TimeseriesFormulaQueryRequest +from datadog_api_client.v2.model.timeseries_formula_request import TimeseriesFormulaRequest +from datadog_api_client.v2.model.timeseries_formula_request_attributes import TimeseriesFormulaRequestAttributes +from datadog_api_client.v2.model.timeseries_formula_request_queries import TimeseriesFormulaRequestQueries +from datadog_api_client.v2.model.timeseries_formula_request_type import TimeseriesFormulaRequestType + +body = TimeseriesFormulaQueryRequest( + data=TimeseriesFormulaRequest( + attributes=TimeseriesFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + interval=5000, + queries=TimeseriesFormulaRequestQueries( + [ + ApmMetricsQuery( + data_source=ApmMetricsDataSource.APM_METRICS, + name="a", + stat=ApmMetricsStat.HITS, + service="web-store", + query_filter="env:prod", + group_by=[ + "resource_name", + ], + ), + ] + ), + to=1636629071000, + ), + type=TimeseriesFormulaRequestType.TIMESERIES_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_timeseries_data(body=body) + + print(response) diff --git a/examples/v2/metrics/QueryTimeseriesData_847716941.py b/examples/v2/metrics/QueryTimeseriesData_847716941.py new file mode 100644 index 0000000000..a90bea00f2 --- /dev/null +++ b/examples/v2/metrics/QueryTimeseriesData_847716941.py @@ -0,0 +1,59 @@ +""" +Timeseries cross product query with apm_dependency_stats data source returns "OK" response +""" + +from datadog_api_client import ApiClient, Configuration +from datadog_api_client.v2.api.metrics_api import MetricsApi +from datadog_api_client.v2.model.apm_dependency_stat_name import ApmDependencyStatName +from datadog_api_client.v2.model.apm_dependency_stats_data_source import ApmDependencyStatsDataSource +from datadog_api_client.v2.model.apm_dependency_stats_query import ApmDependencyStatsQuery +from datadog_api_client.v2.model.formula_limit import FormulaLimit +from datadog_api_client.v2.model.query_formula import QueryFormula +from datadog_api_client.v2.model.query_sort_order import QuerySortOrder +from datadog_api_client.v2.model.timeseries_formula_query_request import TimeseriesFormulaQueryRequest +from datadog_api_client.v2.model.timeseries_formula_request import TimeseriesFormulaRequest +from datadog_api_client.v2.model.timeseries_formula_request_attributes import TimeseriesFormulaRequestAttributes +from datadog_api_client.v2.model.timeseries_formula_request_queries import TimeseriesFormulaRequestQueries +from datadog_api_client.v2.model.timeseries_formula_request_type import TimeseriesFormulaRequestType + +body = TimeseriesFormulaQueryRequest( + data=TimeseriesFormulaRequest( + attributes=TimeseriesFormulaRequestAttributes( + formulas=[ + QueryFormula( + formula="a", + limit=FormulaLimit( + count=10, + order=QuerySortOrder.DESC, + ), + ), + ], + _from=1636625471000, + interval=5000, + queries=TimeseriesFormulaRequestQueries( + [ + ApmDependencyStatsQuery( + data_source=ApmDependencyStatsDataSource.APM_DEPENDENCY_STATS, + name="a", + env="ci", + service="cassandra", + stat=ApmDependencyStatName.AVG_DURATION, + operation_name="cassandra.query", + resource_name="DELETE FROM monitor_history.monitor_state_change_history WHERE org_id = ? AND monitor_id IN ? AND group = ?", + primary_tag_name="datacenter", + primary_tag_value="edge-eu1.prod.dog", + ), + ] + ), + to=1636629071000, + ), + type=TimeseriesFormulaRequestType.TIMESERIES_REQUEST, + ), +) + +configuration = Configuration() +with ApiClient(configuration) as api_client: + api_instance = MetricsApi(api_client) + response = api_instance.query_timeseries_data(body=body) + + print(response) diff --git a/src/datadog_api_client/v2/model/apm_dependency_stat_name.py b/src/datadog_api_client/v2/model/apm_dependency_stat_name.py new file mode 100644 index 0000000000..e18e51a383 --- /dev/null +++ b/src/datadog_api_client/v2/model/apm_dependency_stat_name.py @@ -0,0 +1,53 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class ApmDependencyStatName(ModelSimple): + """ + The APM dependency statistic to query. + + :param value: Must be one of ["avg_duration", "avg_root_duration", "avg_spans_per_trace", "error_rate", "pct_exec_time", "pct_of_traces", "total_traces_count"]. + :type value: str + """ + + allowed_values = { + "avg_duration", + "avg_root_duration", + "avg_spans_per_trace", + "error_rate", + "pct_exec_time", + "pct_of_traces", + "total_traces_count", + } + AVG_DURATION: ClassVar["ApmDependencyStatName"] + AVG_ROOT_DURATION: ClassVar["ApmDependencyStatName"] + AVG_SPANS_PER_TRACE: ClassVar["ApmDependencyStatName"] + ERROR_RATE: ClassVar["ApmDependencyStatName"] + PCT_EXEC_TIME: ClassVar["ApmDependencyStatName"] + PCT_OF_TRACES: ClassVar["ApmDependencyStatName"] + TOTAL_TRACES_COUNT: ClassVar["ApmDependencyStatName"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +ApmDependencyStatName.AVG_DURATION = ApmDependencyStatName("avg_duration") +ApmDependencyStatName.AVG_ROOT_DURATION = ApmDependencyStatName("avg_root_duration") +ApmDependencyStatName.AVG_SPANS_PER_TRACE = ApmDependencyStatName("avg_spans_per_trace") +ApmDependencyStatName.ERROR_RATE = ApmDependencyStatName("error_rate") +ApmDependencyStatName.PCT_EXEC_TIME = ApmDependencyStatName("pct_exec_time") +ApmDependencyStatName.PCT_OF_TRACES = ApmDependencyStatName("pct_of_traces") +ApmDependencyStatName.TOTAL_TRACES_COUNT = ApmDependencyStatName("total_traces_count") diff --git a/src/datadog_api_client/v2/model/apm_dependency_stats_data_source.py b/src/datadog_api_client/v2/model/apm_dependency_stats_data_source.py new file mode 100644 index 0000000000..76102aae96 --- /dev/null +++ b/src/datadog_api_client/v2/model/apm_dependency_stats_data_source.py @@ -0,0 +1,35 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class ApmDependencyStatsDataSource(ModelSimple): + """ + A data source for APM dependency statistics queries. + + :param value: If omitted defaults to "apm_dependency_stats". Must be one of ["apm_dependency_stats"]. + :type value: str + """ + + allowed_values = { + "apm_dependency_stats", + } + APM_DEPENDENCY_STATS: ClassVar["ApmDependencyStatsDataSource"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +ApmDependencyStatsDataSource.APM_DEPENDENCY_STATS = ApmDependencyStatsDataSource("apm_dependency_stats") diff --git a/src/datadog_api_client/v2/model/apm_dependency_stats_query.py b/src/datadog_api_client/v2/model/apm_dependency_stats_query.py new file mode 100644 index 0000000000..b7b51be0d8 --- /dev/null +++ b/src/datadog_api_client/v2/model/apm_dependency_stats_query.py @@ -0,0 +1,114 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + +from typing import Union, TYPE_CHECKING + +from datadog_api_client.model_utils import ( + ModelNormal, + cached_property, + unset, + UnsetType, +) + + +if TYPE_CHECKING: + from datadog_api_client.v2.model.apm_dependency_stats_data_source import ApmDependencyStatsDataSource + from datadog_api_client.v2.model.apm_dependency_stat_name import ApmDependencyStatName + + +class ApmDependencyStatsQuery(ModelNormal): + @cached_property + def openapi_types(_): + from datadog_api_client.v2.model.apm_dependency_stats_data_source import ApmDependencyStatsDataSource + from datadog_api_client.v2.model.apm_dependency_stat_name import ApmDependencyStatName + + return { + "data_source": (ApmDependencyStatsDataSource,), + "env": (str,), + "is_upstream": (bool,), + "name": (str,), + "operation_name": (str,), + "primary_tag_name": (str,), + "primary_tag_value": (str,), + "resource_name": (str,), + "service": (str,), + "stat": (ApmDependencyStatName,), + } + + attribute_map = { + "data_source": "data_source", + "env": "env", + "is_upstream": "is_upstream", + "name": "name", + "operation_name": "operation_name", + "primary_tag_name": "primary_tag_name", + "primary_tag_value": "primary_tag_value", + "resource_name": "resource_name", + "service": "service", + "stat": "stat", + } + + def __init__( + self_, + data_source: ApmDependencyStatsDataSource, + env: str, + name: str, + operation_name: str, + resource_name: str, + service: str, + stat: ApmDependencyStatName, + is_upstream: Union[bool, UnsetType] = unset, + primary_tag_name: Union[str, UnsetType] = unset, + primary_tag_value: Union[str, UnsetType] = unset, + **kwargs, + ): + """ + An individual APM dependency stats query. + + :param data_source: A data source for APM dependency statistics queries. + :type data_source: ApmDependencyStatsDataSource + + :param env: The environment to query. + :type env: str + + :param is_upstream: Determines whether stats for upstream or downstream dependencies should be queried. + :type is_upstream: bool, optional + + :param name: The variable name for use in formulas. + :type name: str + + :param operation_name: The APM operation name. + :type operation_name: str + + :param primary_tag_name: The name of the second primary tag used within APM; required when ``primary_tag_value`` is specified. See https://docs.datadoghq.com/tracing/guide/setting_primary_tags_to_scope/#add-a-second-primary-tag-in-datadog. + :type primary_tag_name: str, optional + + :param primary_tag_value: Filter APM data by the second primary tag. ``primary_tag_name`` must also be specified. + :type primary_tag_value: str, optional + + :param resource_name: The resource name to filter by. + :type resource_name: str + + :param service: The service name to filter by. + :type service: str + + :param stat: The APM dependency statistic to query. + :type stat: ApmDependencyStatName + """ + if is_upstream is not unset: + kwargs["is_upstream"] = is_upstream + if primary_tag_name is not unset: + kwargs["primary_tag_name"] = primary_tag_name + if primary_tag_value is not unset: + kwargs["primary_tag_value"] = primary_tag_value + super().__init__(kwargs) + + self_.data_source = data_source + self_.env = env + self_.name = name + self_.operation_name = operation_name + self_.resource_name = resource_name + self_.service = service + self_.stat = stat diff --git a/src/datadog_api_client/v2/model/apm_metrics_data_source.py b/src/datadog_api_client/v2/model/apm_metrics_data_source.py new file mode 100644 index 0000000000..d9c47a9f7b --- /dev/null +++ b/src/datadog_api_client/v2/model/apm_metrics_data_source.py @@ -0,0 +1,35 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class ApmMetricsDataSource(ModelSimple): + """ + A data source for APM metrics queries. + + :param value: If omitted defaults to "apm_metrics". Must be one of ["apm_metrics"]. + :type value: str + """ + + allowed_values = { + "apm_metrics", + } + APM_METRICS: ClassVar["ApmMetricsDataSource"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +ApmMetricsDataSource.APM_METRICS = ApmMetricsDataSource("apm_metrics") diff --git a/src/datadog_api_client/v2/model/apm_metrics_query.py b/src/datadog_api_client/v2/model/apm_metrics_query.py new file mode 100644 index 0000000000..724348b626 --- /dev/null +++ b/src/datadog_api_client/v2/model/apm_metrics_query.py @@ -0,0 +1,134 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + +from typing import List, Union, TYPE_CHECKING + +from datadog_api_client.model_utils import ( + ModelNormal, + cached_property, + unset, + UnsetType, +) + + +if TYPE_CHECKING: + from datadog_api_client.v2.model.apm_metrics_data_source import ApmMetricsDataSource + from datadog_api_client.v2.model.apm_metrics_stat import ApmMetricsStat + + +class ApmMetricsQuery(ModelNormal): + @cached_property + def openapi_types(_): + from datadog_api_client.v2.model.apm_metrics_data_source import ApmMetricsDataSource + from datadog_api_client.v2.model.apm_metrics_stat import ApmMetricsStat + + return { + "data_source": (ApmMetricsDataSource,), + "group_by": ([str],), + "name": (str,), + "operation_mode": (str,), + "operation_name": (str,), + "peer_tags": ([str],), + "query_filter": (str,), + "resource_hash": (str,), + "resource_name": (str,), + "service": (str,), + "span_kind": (str,), + "stat": (ApmMetricsStat,), + } + + attribute_map = { + "data_source": "data_source", + "group_by": "group_by", + "name": "name", + "operation_mode": "operation_mode", + "operation_name": "operation_name", + "peer_tags": "peer_tags", + "query_filter": "query_filter", + "resource_hash": "resource_hash", + "resource_name": "resource_name", + "service": "service", + "span_kind": "span_kind", + "stat": "stat", + } + + def __init__( + self_, + data_source: ApmMetricsDataSource, + name: str, + stat: ApmMetricsStat, + group_by: Union[List[str], UnsetType] = unset, + operation_mode: Union[str, UnsetType] = unset, + operation_name: Union[str, UnsetType] = unset, + peer_tags: Union[List[str], UnsetType] = unset, + query_filter: Union[str, UnsetType] = unset, + resource_hash: Union[str, UnsetType] = unset, + resource_name: Union[str, UnsetType] = unset, + service: Union[str, UnsetType] = unset, + span_kind: Union[str, UnsetType] = unset, + **kwargs, + ): + """ + An individual APM metrics query. + + :param data_source: A data source for APM metrics queries. + :type data_source: ApmMetricsDataSource + + :param group_by: Optional fields to group the query results by. + :type group_by: [str], optional + + :param name: The variable name for use in formulas. + :type name: str + + :param operation_mode: Optional operation mode to aggregate across operation names. + :type operation_mode: str, optional + + :param operation_name: Name of operation on service. If not provided, the primary operation name is used. + :type operation_name: str, optional + + :param peer_tags: Tags to query for a specific downstream entity (peer.service, peer.db_instance, peer.s3, peer.s3.bucket, etc.). + :type peer_tags: [str], optional + + :param query_filter: Additional filters for the query using metrics query syntax (e.g., env, primary_tag). + :type query_filter: str, optional + + :param resource_hash: The resource hash for exact matching. + :type resource_hash: str, optional + + :param resource_name: The full name of a specific resource to filter by. + :type resource_name: str, optional + + :param service: The service name to filter by. + :type service: str, optional + + :param span_kind: Describes the relationship between the span, its parents, and its children in a trace. Known values include consumer, server, client, producer, internal. + :type span_kind: str, optional + + :param stat: The APM metric statistic to query. + :type stat: ApmMetricsStat + """ + if group_by is not unset: + kwargs["group_by"] = group_by + if operation_mode is not unset: + kwargs["operation_mode"] = operation_mode + if operation_name is not unset: + kwargs["operation_name"] = operation_name + if peer_tags is not unset: + kwargs["peer_tags"] = peer_tags + if query_filter is not unset: + kwargs["query_filter"] = query_filter + if resource_hash is not unset: + kwargs["resource_hash"] = resource_hash + if resource_name is not unset: + kwargs["resource_name"] = resource_name + if service is not unset: + kwargs["service"] = service + if span_kind is not unset: + kwargs["span_kind"] = span_kind + super().__init__(kwargs) + + self_.data_source = data_source + self_.name = name + self_.stat = stat diff --git a/src/datadog_api_client/v2/model/apm_metrics_stat.py b/src/datadog_api_client/v2/model/apm_metrics_stat.py new file mode 100644 index 0000000000..3f66cf1c25 --- /dev/null +++ b/src/datadog_api_client/v2/model/apm_metrics_stat.py @@ -0,0 +1,80 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class ApmMetricsStat(ModelSimple): + """ + The APM metric statistic to query. + + :param value: Must be one of ["error_rate", "errors", "errors_per_second", "hits", "hits_per_second", "apdex", "latency_avg", "latency_max", "latency_p50", "latency_p75", "latency_p90", "latency_p95", "latency_p99", "latency_p999", "latency_distribution", "total_time"]. + :type value: str + """ + + allowed_values = { + "error_rate", + "errors", + "errors_per_second", + "hits", + "hits_per_second", + "apdex", + "latency_avg", + "latency_max", + "latency_p50", + "latency_p75", + "latency_p90", + "latency_p95", + "latency_p99", + "latency_p999", + "latency_distribution", + "total_time", + } + ERROR_RATE: ClassVar["ApmMetricsStat"] + ERRORS: ClassVar["ApmMetricsStat"] + ERRORS_PER_SECOND: ClassVar["ApmMetricsStat"] + HITS: ClassVar["ApmMetricsStat"] + HITS_PER_SECOND: ClassVar["ApmMetricsStat"] + APDEX: ClassVar["ApmMetricsStat"] + LATENCY_AVG: ClassVar["ApmMetricsStat"] + LATENCY_MAX: ClassVar["ApmMetricsStat"] + LATENCY_P50: ClassVar["ApmMetricsStat"] + LATENCY_P75: ClassVar["ApmMetricsStat"] + LATENCY_P90: ClassVar["ApmMetricsStat"] + LATENCY_P95: ClassVar["ApmMetricsStat"] + LATENCY_P99: ClassVar["ApmMetricsStat"] + LATENCY_P999: ClassVar["ApmMetricsStat"] + LATENCY_DISTRIBUTION: ClassVar["ApmMetricsStat"] + TOTAL_TIME: ClassVar["ApmMetricsStat"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +ApmMetricsStat.ERROR_RATE = ApmMetricsStat("error_rate") +ApmMetricsStat.ERRORS = ApmMetricsStat("errors") +ApmMetricsStat.ERRORS_PER_SECOND = ApmMetricsStat("errors_per_second") +ApmMetricsStat.HITS = ApmMetricsStat("hits") +ApmMetricsStat.HITS_PER_SECOND = ApmMetricsStat("hits_per_second") +ApmMetricsStat.APDEX = ApmMetricsStat("apdex") +ApmMetricsStat.LATENCY_AVG = ApmMetricsStat("latency_avg") +ApmMetricsStat.LATENCY_MAX = ApmMetricsStat("latency_max") +ApmMetricsStat.LATENCY_P50 = ApmMetricsStat("latency_p50") +ApmMetricsStat.LATENCY_P75 = ApmMetricsStat("latency_p75") +ApmMetricsStat.LATENCY_P90 = ApmMetricsStat("latency_p90") +ApmMetricsStat.LATENCY_P95 = ApmMetricsStat("latency_p95") +ApmMetricsStat.LATENCY_P99 = ApmMetricsStat("latency_p99") +ApmMetricsStat.LATENCY_P999 = ApmMetricsStat("latency_p999") +ApmMetricsStat.LATENCY_DISTRIBUTION = ApmMetricsStat("latency_distribution") +ApmMetricsStat.TOTAL_TIME = ApmMetricsStat("total_time") diff --git a/src/datadog_api_client/v2/model/apm_resource_stat_name.py b/src/datadog_api_client/v2/model/apm_resource_stat_name.py new file mode 100644 index 0000000000..4cef419cf2 --- /dev/null +++ b/src/datadog_api_client/v2/model/apm_resource_stat_name.py @@ -0,0 +1,68 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class ApmResourceStatName(ModelSimple): + """ + The APM resource statistic to query. + + :param value: Must be one of ["error_rate", "errors", "hits", "latency_avg", "latency_max", "latency_p50", "latency_p75", "latency_p90", "latency_p95", "latency_p99", "latency_distribution", "total_time"]. + :type value: str + """ + + allowed_values = { + "error_rate", + "errors", + "hits", + "latency_avg", + "latency_max", + "latency_p50", + "latency_p75", + "latency_p90", + "latency_p95", + "latency_p99", + "latency_distribution", + "total_time", + } + ERROR_RATE: ClassVar["ApmResourceStatName"] + ERRORS: ClassVar["ApmResourceStatName"] + HITS: ClassVar["ApmResourceStatName"] + LATENCY_AVG: ClassVar["ApmResourceStatName"] + LATENCY_MAX: ClassVar["ApmResourceStatName"] + LATENCY_P50: ClassVar["ApmResourceStatName"] + LATENCY_P75: ClassVar["ApmResourceStatName"] + LATENCY_P90: ClassVar["ApmResourceStatName"] + LATENCY_P95: ClassVar["ApmResourceStatName"] + LATENCY_P99: ClassVar["ApmResourceStatName"] + LATENCY_DISTRIBUTION: ClassVar["ApmResourceStatName"] + TOTAL_TIME: ClassVar["ApmResourceStatName"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +ApmResourceStatName.ERROR_RATE = ApmResourceStatName("error_rate") +ApmResourceStatName.ERRORS = ApmResourceStatName("errors") +ApmResourceStatName.HITS = ApmResourceStatName("hits") +ApmResourceStatName.LATENCY_AVG = ApmResourceStatName("latency_avg") +ApmResourceStatName.LATENCY_MAX = ApmResourceStatName("latency_max") +ApmResourceStatName.LATENCY_P50 = ApmResourceStatName("latency_p50") +ApmResourceStatName.LATENCY_P75 = ApmResourceStatName("latency_p75") +ApmResourceStatName.LATENCY_P90 = ApmResourceStatName("latency_p90") +ApmResourceStatName.LATENCY_P95 = ApmResourceStatName("latency_p95") +ApmResourceStatName.LATENCY_P99 = ApmResourceStatName("latency_p99") +ApmResourceStatName.LATENCY_DISTRIBUTION = ApmResourceStatName("latency_distribution") +ApmResourceStatName.TOTAL_TIME = ApmResourceStatName("total_time") diff --git a/src/datadog_api_client/v2/model/apm_resource_stats_data_source.py b/src/datadog_api_client/v2/model/apm_resource_stats_data_source.py new file mode 100644 index 0000000000..769f736bd5 --- /dev/null +++ b/src/datadog_api_client/v2/model/apm_resource_stats_data_source.py @@ -0,0 +1,35 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class ApmResourceStatsDataSource(ModelSimple): + """ + A data source for APM resource statistics queries. + + :param value: If omitted defaults to "apm_resource_stats". Must be one of ["apm_resource_stats"]. + :type value: str + """ + + allowed_values = { + "apm_resource_stats", + } + APM_RESOURCE_STATS: ClassVar["ApmResourceStatsDataSource"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +ApmResourceStatsDataSource.APM_RESOURCE_STATS = ApmResourceStatsDataSource("apm_resource_stats") diff --git a/src/datadog_api_client/v2/model/apm_resource_stats_query.py b/src/datadog_api_client/v2/model/apm_resource_stats_query.py new file mode 100644 index 0000000000..4fec9e07e7 --- /dev/null +++ b/src/datadog_api_client/v2/model/apm_resource_stats_query.py @@ -0,0 +1,116 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + +from typing import List, Union, TYPE_CHECKING + +from datadog_api_client.model_utils import ( + ModelNormal, + cached_property, + unset, + UnsetType, +) + + +if TYPE_CHECKING: + from datadog_api_client.v2.model.apm_resource_stats_data_source import ApmResourceStatsDataSource + from datadog_api_client.v2.model.apm_resource_stat_name import ApmResourceStatName + + +class ApmResourceStatsQuery(ModelNormal): + @cached_property + def openapi_types(_): + from datadog_api_client.v2.model.apm_resource_stats_data_source import ApmResourceStatsDataSource + from datadog_api_client.v2.model.apm_resource_stat_name import ApmResourceStatName + + return { + "data_source": (ApmResourceStatsDataSource,), + "env": (str,), + "group_by": ([str],), + "name": (str,), + "operation_name": (str,), + "primary_tag_name": (str,), + "primary_tag_value": (str,), + "resource_name": (str,), + "service": (str,), + "stat": (ApmResourceStatName,), + } + + attribute_map = { + "data_source": "data_source", + "env": "env", + "group_by": "group_by", + "name": "name", + "operation_name": "operation_name", + "primary_tag_name": "primary_tag_name", + "primary_tag_value": "primary_tag_value", + "resource_name": "resource_name", + "service": "service", + "stat": "stat", + } + + def __init__( + self_, + data_source: ApmResourceStatsDataSource, + env: str, + name: str, + service: str, + stat: ApmResourceStatName, + group_by: Union[List[str], UnsetType] = unset, + operation_name: Union[str, UnsetType] = unset, + primary_tag_name: Union[str, UnsetType] = unset, + primary_tag_value: Union[str, UnsetType] = unset, + resource_name: Union[str, UnsetType] = unset, + **kwargs, + ): + """ + An individual APM resource stats query. + + :param data_source: A data source for APM resource statistics queries. + :type data_source: ApmResourceStatsDataSource + + :param env: The environment to query. + :type env: str + + :param group_by: Tag keys to group results by. + :type group_by: [str], optional + + :param name: The variable name for use in formulas. + :type name: str + + :param operation_name: The APM operation name. + :type operation_name: str, optional + + :param primary_tag_name: Name of the second primary tag used within APM. Required when ``primary_tag_value`` is specified. See https://docs.datadoghq.com/tracing/guide/setting_primary_tags_to_scope/#add-a-second-primary-tag-in-datadog + :type primary_tag_name: str, optional + + :param primary_tag_value: Value of the second primary tag by which to filter APM data. ``primary_tag_name`` must also be specified. + :type primary_tag_value: str, optional + + :param resource_name: The resource name to filter by. + :type resource_name: str, optional + + :param service: The service name to filter by. + :type service: str + + :param stat: The APM resource statistic to query. + :type stat: ApmResourceStatName + """ + if group_by is not unset: + kwargs["group_by"] = group_by + if operation_name is not unset: + kwargs["operation_name"] = operation_name + if primary_tag_name is not unset: + kwargs["primary_tag_name"] = primary_tag_name + if primary_tag_value is not unset: + kwargs["primary_tag_value"] = primary_tag_value + if resource_name is not unset: + kwargs["resource_name"] = resource_name + super().__init__(kwargs) + + self_.data_source = data_source + self_.env = env + self_.name = name + self_.service = service + self_.stat = stat diff --git a/src/datadog_api_client/v2/model/container_data_source.py b/src/datadog_api_client/v2/model/container_data_source.py new file mode 100644 index 0000000000..2dd596d2fa --- /dev/null +++ b/src/datadog_api_client/v2/model/container_data_source.py @@ -0,0 +1,35 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class ContainerDataSource(ModelSimple): + """ + A data source for container-level infrastructure metrics. + + :param value: If omitted defaults to "container". Must be one of ["container"]. + :type value: str + """ + + allowed_values = { + "container", + } + CONTAINER: ClassVar["ContainerDataSource"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +ContainerDataSource.CONTAINER = ContainerDataSource("container") diff --git a/src/datadog_api_client/v2/model/container_scalar_query.py b/src/datadog_api_client/v2/model/container_scalar_query.py new file mode 100644 index 0000000000..effed850a7 --- /dev/null +++ b/src/datadog_api_client/v2/model/container_scalar_query.py @@ -0,0 +1,108 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + +from typing import List, Union, TYPE_CHECKING + +from datadog_api_client.model_utils import ( + ModelNormal, + cached_property, + unset, + UnsetType, +) + + +if TYPE_CHECKING: + from datadog_api_client.v2.model.container_data_source import ContainerDataSource + + +class ContainerScalarQuery(ModelNormal): + @cached_property + def openapi_types(_): + from datadog_api_client.v2.model.container_data_source import ContainerDataSource + + return { + "aggregator": (str,), + "data_source": (ContainerDataSource,), + "is_normalized_cpu": (bool,), + "limit": (int,), + "metric": (str,), + "name": (str,), + "sort": (str,), + "tag_filters": ([str],), + "text_filter": (str,), + } + + attribute_map = { + "aggregator": "aggregator", + "data_source": "data_source", + "is_normalized_cpu": "is_normalized_cpu", + "limit": "limit", + "metric": "metric", + "name": "name", + "sort": "sort", + "tag_filters": "tag_filters", + "text_filter": "text_filter", + } + + def __init__( + self_, + data_source: ContainerDataSource, + metric: str, + name: str, + aggregator: Union[str, UnsetType] = unset, + is_normalized_cpu: Union[bool, UnsetType] = unset, + limit: Union[int, UnsetType] = unset, + sort: Union[str, UnsetType] = unset, + tag_filters: Union[List[str], UnsetType] = unset, + text_filter: Union[str, UnsetType] = unset, + **kwargs, + ): + """ + An individual scalar container query. + + :param aggregator: The temporal reduction function to apply. + :type aggregator: str, optional + + :param data_source: A data source for container-level infrastructure metrics. + :type data_source: ContainerDataSource + + :param is_normalized_cpu: Whether CPU metrics should be normalized by core count. + :type is_normalized_cpu: bool, optional + + :param limit: Maximum number of timeseries to return. + :type limit: int, optional + + :param metric: The container metric to query. + :type metric: str + + :param name: The variable name for use in formulas. + :type name: str + + :param sort: Sort order for the results. + :type sort: str, optional + + :param tag_filters: Tag filters to narrow down containers. + :type tag_filters: [str], optional + + :param text_filter: A full-text search filter to match container names. + :type text_filter: str, optional + """ + if aggregator is not unset: + kwargs["aggregator"] = aggregator + if is_normalized_cpu is not unset: + kwargs["is_normalized_cpu"] = is_normalized_cpu + if limit is not unset: + kwargs["limit"] = limit + if sort is not unset: + kwargs["sort"] = sort + if tag_filters is not unset: + kwargs["tag_filters"] = tag_filters + if text_filter is not unset: + kwargs["text_filter"] = text_filter + super().__init__(kwargs) + + self_.data_source = data_source + self_.metric = metric + self_.name = name diff --git a/src/datadog_api_client/v2/model/container_timeseries_query.py b/src/datadog_api_client/v2/model/container_timeseries_query.py new file mode 100644 index 0000000000..89f0ce1e84 --- /dev/null +++ b/src/datadog_api_client/v2/model/container_timeseries_query.py @@ -0,0 +1,100 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + +from typing import List, Union, TYPE_CHECKING + +from datadog_api_client.model_utils import ( + ModelNormal, + cached_property, + unset, + UnsetType, +) + + +if TYPE_CHECKING: + from datadog_api_client.v2.model.container_data_source import ContainerDataSource + + +class ContainerTimeseriesQuery(ModelNormal): + @cached_property + def openapi_types(_): + from datadog_api_client.v2.model.container_data_source import ContainerDataSource + + return { + "data_source": (ContainerDataSource,), + "is_normalized_cpu": (bool,), + "limit": (int,), + "metric": (str,), + "name": (str,), + "sort": (str,), + "tag_filters": ([str],), + "text_filter": (str,), + } + + attribute_map = { + "data_source": "data_source", + "is_normalized_cpu": "is_normalized_cpu", + "limit": "limit", + "metric": "metric", + "name": "name", + "sort": "sort", + "tag_filters": "tag_filters", + "text_filter": "text_filter", + } + + def __init__( + self_, + data_source: ContainerDataSource, + metric: str, + name: str, + is_normalized_cpu: Union[bool, UnsetType] = unset, + limit: Union[int, UnsetType] = unset, + sort: Union[str, UnsetType] = unset, + tag_filters: Union[List[str], UnsetType] = unset, + text_filter: Union[str, UnsetType] = unset, + **kwargs, + ): + """ + An individual timeseries container query. + + :param data_source: A data source for container-level infrastructure metrics. + :type data_source: ContainerDataSource + + :param is_normalized_cpu: Whether CPU metrics should be normalized by core count. + :type is_normalized_cpu: bool, optional + + :param limit: Maximum number of timeseries to return. + :type limit: int, optional + + :param metric: The container metric to query. + :type metric: str + + :param name: The variable name for use in formulas. + :type name: str + + :param sort: Sort order for the results. + :type sort: str, optional + + :param tag_filters: Tag filters to narrow down containers. + :type tag_filters: [str], optional + + :param text_filter: A full-text search filter to match container names. + :type text_filter: str, optional + """ + if is_normalized_cpu is not unset: + kwargs["is_normalized_cpu"] = is_normalized_cpu + if limit is not unset: + kwargs["limit"] = limit + if sort is not unset: + kwargs["sort"] = sort + if tag_filters is not unset: + kwargs["tag_filters"] = tag_filters + if text_filter is not unset: + kwargs["text_filter"] = text_filter + super().__init__(kwargs) + + self_.data_source = data_source + self_.metric = metric + self_.name = name diff --git a/src/datadog_api_client/v2/model/events_data_source.py b/src/datadog_api_client/v2/model/events_data_source.py index 19cbce4093..132b2f61ee 100644 --- a/src/datadog_api_client/v2/model/events_data_source.py +++ b/src/datadog_api_client/v2/model/events_data_source.py @@ -16,17 +16,39 @@ class EventsDataSource(ModelSimple): """ A data source that is powered by the Events Platform. - :param value: If omitted defaults to "logs". Must be one of ["logs", "rum", "dora"]. + :param value: If omitted defaults to "logs". Must be one of ["logs", "spans", "network", "rum", "security_signals", "profiles", "audit", "events", "ci_tests", "ci_pipelines", "incident_analytics", "product_analytics", "on_call_events", "dora"]. :type value: str """ allowed_values = { "logs", + "spans", + "network", "rum", + "security_signals", + "profiles", + "audit", + "events", + "ci_tests", + "ci_pipelines", + "incident_analytics", + "product_analytics", + "on_call_events", "dora", } LOGS: ClassVar["EventsDataSource"] + SPANS: ClassVar["EventsDataSource"] + NETWORK: ClassVar["EventsDataSource"] RUM: ClassVar["EventsDataSource"] + SECURITY_SIGNALS: ClassVar["EventsDataSource"] + PROFILES: ClassVar["EventsDataSource"] + AUDIT: ClassVar["EventsDataSource"] + EVENTS: ClassVar["EventsDataSource"] + CI_TESTS: ClassVar["EventsDataSource"] + CI_PIPELINES: ClassVar["EventsDataSource"] + INCIDENT_ANALYTICS: ClassVar["EventsDataSource"] + PRODUCT_ANALYTICS: ClassVar["EventsDataSource"] + ON_CALL_EVENTS: ClassVar["EventsDataSource"] DORA: ClassVar["EventsDataSource"] @cached_property @@ -37,5 +59,16 @@ def openapi_types(_): EventsDataSource.LOGS = EventsDataSource("logs") +EventsDataSource.SPANS = EventsDataSource("spans") +EventsDataSource.NETWORK = EventsDataSource("network") EventsDataSource.RUM = EventsDataSource("rum") +EventsDataSource.SECURITY_SIGNALS = EventsDataSource("security_signals") +EventsDataSource.PROFILES = EventsDataSource("profiles") +EventsDataSource.AUDIT = EventsDataSource("audit") +EventsDataSource.EVENTS = EventsDataSource("events") +EventsDataSource.CI_TESTS = EventsDataSource("ci_tests") +EventsDataSource.CI_PIPELINES = EventsDataSource("ci_pipelines") +EventsDataSource.INCIDENT_ANALYTICS = EventsDataSource("incident_analytics") +EventsDataSource.PRODUCT_ANALYTICS = EventsDataSource("product_analytics") +EventsDataSource.ON_CALL_EVENTS = EventsDataSource("on_call_events") EventsDataSource.DORA = EventsDataSource("dora") diff --git a/src/datadog_api_client/v2/model/process_data_source.py b/src/datadog_api_client/v2/model/process_data_source.py new file mode 100644 index 0000000000..252a42dc4e --- /dev/null +++ b/src/datadog_api_client/v2/model/process_data_source.py @@ -0,0 +1,35 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class ProcessDataSource(ModelSimple): + """ + A data source for process-level infrastructure metrics. + + :param value: If omitted defaults to "process". Must be one of ["process"]. + :type value: str + """ + + allowed_values = { + "process", + } + PROCESS: ClassVar["ProcessDataSource"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +ProcessDataSource.PROCESS = ProcessDataSource("process") diff --git a/src/datadog_api_client/v2/model/process_scalar_query.py b/src/datadog_api_client/v2/model/process_scalar_query.py new file mode 100644 index 0000000000..908a29bcdd --- /dev/null +++ b/src/datadog_api_client/v2/model/process_scalar_query.py @@ -0,0 +1,108 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + +from typing import List, Union, TYPE_CHECKING + +from datadog_api_client.model_utils import ( + ModelNormal, + cached_property, + unset, + UnsetType, +) + + +if TYPE_CHECKING: + from datadog_api_client.v2.model.process_data_source import ProcessDataSource + + +class ProcessScalarQuery(ModelNormal): + @cached_property + def openapi_types(_): + from datadog_api_client.v2.model.process_data_source import ProcessDataSource + + return { + "aggregator": (str,), + "data_source": (ProcessDataSource,), + "is_normalized_cpu": (bool,), + "limit": (int,), + "metric": (str,), + "name": (str,), + "sort": (str,), + "tag_filters": ([str],), + "text_filter": (str,), + } + + attribute_map = { + "aggregator": "aggregator", + "data_source": "data_source", + "is_normalized_cpu": "is_normalized_cpu", + "limit": "limit", + "metric": "metric", + "name": "name", + "sort": "sort", + "tag_filters": "tag_filters", + "text_filter": "text_filter", + } + + def __init__( + self_, + data_source: ProcessDataSource, + metric: str, + name: str, + aggregator: Union[str, UnsetType] = unset, + is_normalized_cpu: Union[bool, UnsetType] = unset, + limit: Union[int, UnsetType] = unset, + sort: Union[str, UnsetType] = unset, + tag_filters: Union[List[str], UnsetType] = unset, + text_filter: Union[str, UnsetType] = unset, + **kwargs, + ): + """ + An individual scalar process query. + + :param aggregator: The temporal reduction function to apply. + :type aggregator: str, optional + + :param data_source: A data source for process-level infrastructure metrics. + :type data_source: ProcessDataSource + + :param is_normalized_cpu: Whether CPU metrics should be normalized by core count. + :type is_normalized_cpu: bool, optional + + :param limit: Maximum number of timeseries to return. + :type limit: int, optional + + :param metric: The process metric to query. + :type metric: str + + :param name: The variable name for use in formulas. + :type name: str + + :param sort: Sort order for the results. + :type sort: str, optional + + :param tag_filters: Tag filters to narrow down processes. + :type tag_filters: [str], optional + + :param text_filter: A full-text search filter to match process names or commands. + :type text_filter: str, optional + """ + if aggregator is not unset: + kwargs["aggregator"] = aggregator + if is_normalized_cpu is not unset: + kwargs["is_normalized_cpu"] = is_normalized_cpu + if limit is not unset: + kwargs["limit"] = limit + if sort is not unset: + kwargs["sort"] = sort + if tag_filters is not unset: + kwargs["tag_filters"] = tag_filters + if text_filter is not unset: + kwargs["text_filter"] = text_filter + super().__init__(kwargs) + + self_.data_source = data_source + self_.metric = metric + self_.name = name diff --git a/src/datadog_api_client/v2/model/process_timeseries_query.py b/src/datadog_api_client/v2/model/process_timeseries_query.py new file mode 100644 index 0000000000..0039fff209 --- /dev/null +++ b/src/datadog_api_client/v2/model/process_timeseries_query.py @@ -0,0 +1,100 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + +from typing import List, Union, TYPE_CHECKING + +from datadog_api_client.model_utils import ( + ModelNormal, + cached_property, + unset, + UnsetType, +) + + +if TYPE_CHECKING: + from datadog_api_client.v2.model.process_data_source import ProcessDataSource + + +class ProcessTimeseriesQuery(ModelNormal): + @cached_property + def openapi_types(_): + from datadog_api_client.v2.model.process_data_source import ProcessDataSource + + return { + "data_source": (ProcessDataSource,), + "is_normalized_cpu": (bool,), + "limit": (int,), + "metric": (str,), + "name": (str,), + "sort": (str,), + "tag_filters": ([str],), + "text_filter": (str,), + } + + attribute_map = { + "data_source": "data_source", + "is_normalized_cpu": "is_normalized_cpu", + "limit": "limit", + "metric": "metric", + "name": "name", + "sort": "sort", + "tag_filters": "tag_filters", + "text_filter": "text_filter", + } + + def __init__( + self_, + data_source: ProcessDataSource, + metric: str, + name: str, + is_normalized_cpu: Union[bool, UnsetType] = unset, + limit: Union[int, UnsetType] = unset, + sort: Union[str, UnsetType] = unset, + tag_filters: Union[List[str], UnsetType] = unset, + text_filter: Union[str, UnsetType] = unset, + **kwargs, + ): + """ + An individual timeseries process query. + + :param data_source: A data source for process-level infrastructure metrics. + :type data_source: ProcessDataSource + + :param is_normalized_cpu: Whether CPU metrics should be normalized by core count. + :type is_normalized_cpu: bool, optional + + :param limit: Maximum number of timeseries to return. + :type limit: int, optional + + :param metric: The process metric to query. + :type metric: str + + :param name: The variable name for use in formulas. + :type name: str + + :param sort: Sort order for the results. + :type sort: str, optional + + :param tag_filters: Tag filters to narrow down processes. + :type tag_filters: [str], optional + + :param text_filter: A full-text search filter to match process names or commands. + :type text_filter: str, optional + """ + if is_normalized_cpu is not unset: + kwargs["is_normalized_cpu"] = is_normalized_cpu + if limit is not unset: + kwargs["limit"] = limit + if sort is not unset: + kwargs["sort"] = sort + if tag_filters is not unset: + kwargs["tag_filters"] = tag_filters + if text_filter is not unset: + kwargs["text_filter"] = text_filter + super().__init__(kwargs) + + self_.data_source = data_source + self_.metric = metric + self_.name = name diff --git a/src/datadog_api_client/v2/model/scalar_query.py b/src/datadog_api_client/v2/model/scalar_query.py index 7fe6386d10..2258044ea4 100644 --- a/src/datadog_api_client/v2/model/scalar_query.py +++ b/src/datadog_api_client/v2/model/scalar_query.py @@ -38,6 +38,78 @@ def __init__(self, **kwargs): :param search: Configuration of the search/filter for an events query. :type search: EventsSearch, optional + + :param env: The environment to query. + :type env: str + + :param operation_name: The APM operation name. + :type operation_name: str, optional + + :param primary_tag_name: Name of the second primary tag used within APM. Required when `primary_tag_value` is specified. See https://docs.datadoghq.com/tracing/guide/setting_primary_tags_to_scope/#add-a-second-primary-tag-in-datadog + :type primary_tag_name: str, optional + + :param primary_tag_value: Value of the second primary tag by which to filter APM data. `primary_tag_name` must also be specified. + :type primary_tag_value: str, optional + + :param resource_name: The resource name to filter by. + :type resource_name: str, optional + + :param service: The service name to filter by. + :type service: str + + :param stat: The APM resource statistic to query. + :type stat: ApmResourceStatName + + :param operation_mode: Optional operation mode to aggregate across operation names. + :type operation_mode: str, optional + + :param peer_tags: Tags to query for a specific downstream entity (peer.service, peer.db_instance, peer.s3, peer.s3.bucket, etc.). + :type peer_tags: [str], optional + + :param query_filter: Additional filters for the query using metrics query syntax (e.g., env, primary_tag). + :type query_filter: str, optional + + :param resource_hash: The resource hash for exact matching. + :type resource_hash: str, optional + + :param span_kind: Describes the relationship between the span, its parents, and its children in a trace. Known values include consumer, server, client, producer, internal. + :type span_kind: str, optional + + :param is_upstream: Determines whether stats for upstream or downstream dependencies should be queried. + :type is_upstream: bool, optional + + :param additional_query_filters: Additional filters applied to the SLO query. + :type additional_query_filters: str, optional + + :param group_mode: How SLO results are grouped in the response. + :type group_mode: SlosGroupMode, optional + + :param measure: The SLO measurement to retrieve. + :type measure: SlosMeasure + + :param slo_id: The unique identifier of the SLO to query. + :type slo_id: str + + :param slo_query_type: The type of SLO definition being queried. + :type slo_query_type: SlosQueryType, optional + + :param is_normalized_cpu: Whether CPU metrics should be normalized by core count. + :type is_normalized_cpu: bool, optional + + :param limit: Maximum number of timeseries to return. + :type limit: int, optional + + :param metric: The process metric to query. + :type metric: str + + :param sort: Sort order for the results. + :type sort: str, optional + + :param tag_filters: Tag filters to narrow down processes. + :type tag_filters: [str], optional + + :param text_filter: A full-text search filter to match process names or commands. + :type text_filter: str, optional """ super().__init__(kwargs) @@ -52,10 +124,22 @@ def _composed_schemas(_): # loading from datadog_api_client.v2.model.metrics_scalar_query import MetricsScalarQuery from datadog_api_client.v2.model.events_scalar_query import EventsScalarQuery + from datadog_api_client.v2.model.apm_resource_stats_query import ApmResourceStatsQuery + from datadog_api_client.v2.model.apm_metrics_query import ApmMetricsQuery + from datadog_api_client.v2.model.apm_dependency_stats_query import ApmDependencyStatsQuery + from datadog_api_client.v2.model.slo_query import SloQuery + from datadog_api_client.v2.model.process_scalar_query import ProcessScalarQuery + from datadog_api_client.v2.model.container_scalar_query import ContainerScalarQuery return { "oneOf": [ MetricsScalarQuery, EventsScalarQuery, + ApmResourceStatsQuery, + ApmMetricsQuery, + ApmDependencyStatsQuery, + SloQuery, + ProcessScalarQuery, + ContainerScalarQuery, ], } diff --git a/src/datadog_api_client/v2/model/slo_data_source.py b/src/datadog_api_client/v2/model/slo_data_source.py new file mode 100644 index 0000000000..1f2bb6a054 --- /dev/null +++ b/src/datadog_api_client/v2/model/slo_data_source.py @@ -0,0 +1,35 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class SloDataSource(ModelSimple): + """ + A data source for SLO queries. + + :param value: If omitted defaults to "slo". Must be one of ["slo"]. + :type value: str + """ + + allowed_values = { + "slo", + } + SLO: ClassVar["SloDataSource"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +SloDataSource.SLO = SloDataSource("slo") diff --git a/src/datadog_api_client/v2/model/slo_query.py b/src/datadog_api_client/v2/model/slo_query.py new file mode 100644 index 0000000000..d41072f524 --- /dev/null +++ b/src/datadog_api_client/v2/model/slo_query.py @@ -0,0 +1,98 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + +from typing import Union, TYPE_CHECKING + +from datadog_api_client.model_utils import ( + ModelNormal, + cached_property, + unset, + UnsetType, +) + + +if TYPE_CHECKING: + from datadog_api_client.v2.model.slo_data_source import SloDataSource + from datadog_api_client.v2.model.slos_group_mode import SlosGroupMode + from datadog_api_client.v2.model.slos_measure import SlosMeasure + from datadog_api_client.v2.model.slos_query_type import SlosQueryType + + +class SloQuery(ModelNormal): + @cached_property + def openapi_types(_): + from datadog_api_client.v2.model.slo_data_source import SloDataSource + from datadog_api_client.v2.model.slos_group_mode import SlosGroupMode + from datadog_api_client.v2.model.slos_measure import SlosMeasure + from datadog_api_client.v2.model.slos_query_type import SlosQueryType + + return { + "additional_query_filters": (str,), + "data_source": (SloDataSource,), + "group_mode": (SlosGroupMode,), + "measure": (SlosMeasure,), + "name": (str,), + "slo_id": (str,), + "slo_query_type": (SlosQueryType,), + } + + attribute_map = { + "additional_query_filters": "additional_query_filters", + "data_source": "data_source", + "group_mode": "group_mode", + "measure": "measure", + "name": "name", + "slo_id": "slo_id", + "slo_query_type": "slo_query_type", + } + + def __init__( + self_, + data_source: SloDataSource, + measure: SlosMeasure, + slo_id: str, + additional_query_filters: Union[str, UnsetType] = unset, + group_mode: Union[SlosGroupMode, UnsetType] = unset, + name: Union[str, UnsetType] = unset, + slo_query_type: Union[SlosQueryType, UnsetType] = unset, + **kwargs, + ): + """ + An individual SLO query. + + :param additional_query_filters: Additional filters applied to the SLO query. + :type additional_query_filters: str, optional + + :param data_source: A data source for SLO queries. + :type data_source: SloDataSource + + :param group_mode: How SLO results are grouped in the response. + :type group_mode: SlosGroupMode, optional + + :param measure: The SLO measurement to retrieve. + :type measure: SlosMeasure + + :param name: The variable name for use in formulas. + :type name: str, optional + + :param slo_id: The unique identifier of the SLO to query. + :type slo_id: str + + :param slo_query_type: The type of SLO definition being queried. + :type slo_query_type: SlosQueryType, optional + """ + if additional_query_filters is not unset: + kwargs["additional_query_filters"] = additional_query_filters + if group_mode is not unset: + kwargs["group_mode"] = group_mode + if name is not unset: + kwargs["name"] = name + if slo_query_type is not unset: + kwargs["slo_query_type"] = slo_query_type + super().__init__(kwargs) + + self_.data_source = data_source + self_.measure = measure + self_.slo_id = slo_id diff --git a/src/datadog_api_client/v2/model/slos_group_mode.py b/src/datadog_api_client/v2/model/slos_group_mode.py new file mode 100644 index 0000000000..b9d20949d2 --- /dev/null +++ b/src/datadog_api_client/v2/model/slos_group_mode.py @@ -0,0 +1,38 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class SlosGroupMode(ModelSimple): + """ + How SLO results are grouped in the response. + + :param value: Must be one of ["overall", "components"]. + :type value: str + """ + + allowed_values = { + "overall", + "components", + } + OVERALL: ClassVar["SlosGroupMode"] + COMPONENTS: ClassVar["SlosGroupMode"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +SlosGroupMode.OVERALL = SlosGroupMode("overall") +SlosGroupMode.COMPONENTS = SlosGroupMode("components") diff --git a/src/datadog_api_client/v2/model/slos_measure.py b/src/datadog_api_client/v2/model/slos_measure.py new file mode 100644 index 0000000000..3ad4118381 --- /dev/null +++ b/src/datadog_api_client/v2/model/slos_measure.py @@ -0,0 +1,62 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class SlosMeasure(ModelSimple): + """ + The SLO measurement to retrieve. + + :param value: Must be one of ["good_events", "bad_events", "slo_status", "error_budget_remaining", "error_budget_remaining_history", "error_budget_burndown", "burn_rate", "slo_status_history", "good_minutes", "bad_minutes"]. + :type value: str + """ + + allowed_values = { + "good_events", + "bad_events", + "slo_status", + "error_budget_remaining", + "error_budget_remaining_history", + "error_budget_burndown", + "burn_rate", + "slo_status_history", + "good_minutes", + "bad_minutes", + } + GOOD_EVENTS: ClassVar["SlosMeasure"] + BAD_EVENTS: ClassVar["SlosMeasure"] + SLO_STATUS: ClassVar["SlosMeasure"] + ERROR_BUDGET_REMAINING: ClassVar["SlosMeasure"] + ERROR_BUDGET_REMAINING_HISTORY: ClassVar["SlosMeasure"] + ERROR_BUDGET_BURNDOWN: ClassVar["SlosMeasure"] + BURN_RATE: ClassVar["SlosMeasure"] + SLO_STATUS_HISTORY: ClassVar["SlosMeasure"] + GOOD_MINUTES: ClassVar["SlosMeasure"] + BAD_MINUTES: ClassVar["SlosMeasure"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +SlosMeasure.GOOD_EVENTS = SlosMeasure("good_events") +SlosMeasure.BAD_EVENTS = SlosMeasure("bad_events") +SlosMeasure.SLO_STATUS = SlosMeasure("slo_status") +SlosMeasure.ERROR_BUDGET_REMAINING = SlosMeasure("error_budget_remaining") +SlosMeasure.ERROR_BUDGET_REMAINING_HISTORY = SlosMeasure("error_budget_remaining_history") +SlosMeasure.ERROR_BUDGET_BURNDOWN = SlosMeasure("error_budget_burndown") +SlosMeasure.BURN_RATE = SlosMeasure("burn_rate") +SlosMeasure.SLO_STATUS_HISTORY = SlosMeasure("slo_status_history") +SlosMeasure.GOOD_MINUTES = SlosMeasure("good_minutes") +SlosMeasure.BAD_MINUTES = SlosMeasure("bad_minutes") diff --git a/src/datadog_api_client/v2/model/slos_query_type.py b/src/datadog_api_client/v2/model/slos_query_type.py new file mode 100644 index 0000000000..02b747c797 --- /dev/null +++ b/src/datadog_api_client/v2/model/slos_query_type.py @@ -0,0 +1,41 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019-Present Datadog, Inc. +from __future__ import annotations + + +from datadog_api_client.model_utils import ( + ModelSimple, + cached_property, +) + +from typing import ClassVar + + +class SlosQueryType(ModelSimple): + """ + The type of SLO definition being queried. + + :param value: Must be one of ["metric", "time_slice", "monitor"]. + :type value: str + """ + + allowed_values = { + "metric", + "time_slice", + "monitor", + } + METRIC: ClassVar["SlosQueryType"] + TIME_SLICE: ClassVar["SlosQueryType"] + MONITOR: ClassVar["SlosQueryType"] + + @cached_property + def openapi_types(_): + return { + "value": (str,), + } + + +SlosQueryType.METRIC = SlosQueryType("metric") +SlosQueryType.TIME_SLICE = SlosQueryType("time_slice") +SlosQueryType.MONITOR = SlosQueryType("monitor") diff --git a/src/datadog_api_client/v2/model/timeseries_query.py b/src/datadog_api_client/v2/model/timeseries_query.py index 4ffaab2191..1b33482fa8 100644 --- a/src/datadog_api_client/v2/model/timeseries_query.py +++ b/src/datadog_api_client/v2/model/timeseries_query.py @@ -35,6 +35,78 @@ def __init__(self, **kwargs): :param search: Configuration of the search/filter for an events query. :type search: EventsSearch, optional + + :param env: The environment to query. + :type env: str + + :param operation_name: The APM operation name. + :type operation_name: str, optional + + :param primary_tag_name: Name of the second primary tag used within APM. Required when `primary_tag_value` is specified. See https://docs.datadoghq.com/tracing/guide/setting_primary_tags_to_scope/#add-a-second-primary-tag-in-datadog + :type primary_tag_name: str, optional + + :param primary_tag_value: Value of the second primary tag by which to filter APM data. `primary_tag_name` must also be specified. + :type primary_tag_value: str, optional + + :param resource_name: The resource name to filter by. + :type resource_name: str, optional + + :param service: The service name to filter by. + :type service: str + + :param stat: The APM resource statistic to query. + :type stat: ApmResourceStatName + + :param operation_mode: Optional operation mode to aggregate across operation names. + :type operation_mode: str, optional + + :param peer_tags: Tags to query for a specific downstream entity (peer.service, peer.db_instance, peer.s3, peer.s3.bucket, etc.). + :type peer_tags: [str], optional + + :param query_filter: Additional filters for the query using metrics query syntax (e.g., env, primary_tag). + :type query_filter: str, optional + + :param resource_hash: The resource hash for exact matching. + :type resource_hash: str, optional + + :param span_kind: Describes the relationship between the span, its parents, and its children in a trace. Known values include consumer, server, client, producer, internal. + :type span_kind: str, optional + + :param is_upstream: Determines whether stats for upstream or downstream dependencies should be queried. + :type is_upstream: bool, optional + + :param additional_query_filters: Additional filters applied to the SLO query. + :type additional_query_filters: str, optional + + :param group_mode: How SLO results are grouped in the response. + :type group_mode: SlosGroupMode, optional + + :param measure: The SLO measurement to retrieve. + :type measure: SlosMeasure + + :param slo_id: The unique identifier of the SLO to query. + :type slo_id: str + + :param slo_query_type: The type of SLO definition being queried. + :type slo_query_type: SlosQueryType, optional + + :param is_normalized_cpu: Whether CPU metrics should be normalized by core count. + :type is_normalized_cpu: bool, optional + + :param limit: Maximum number of timeseries to return. + :type limit: int, optional + + :param metric: The process metric to query. + :type metric: str + + :param sort: Sort order for the results. + :type sort: str, optional + + :param tag_filters: Tag filters to narrow down processes. + :type tag_filters: [str], optional + + :param text_filter: A full-text search filter to match process names or commands. + :type text_filter: str, optional """ super().__init__(kwargs) @@ -49,10 +121,22 @@ def _composed_schemas(_): # loading from datadog_api_client.v2.model.metrics_timeseries_query import MetricsTimeseriesQuery from datadog_api_client.v2.model.events_timeseries_query import EventsTimeseriesQuery + from datadog_api_client.v2.model.apm_resource_stats_query import ApmResourceStatsQuery + from datadog_api_client.v2.model.apm_metrics_query import ApmMetricsQuery + from datadog_api_client.v2.model.apm_dependency_stats_query import ApmDependencyStatsQuery + from datadog_api_client.v2.model.slo_query import SloQuery + from datadog_api_client.v2.model.process_timeseries_query import ProcessTimeseriesQuery + from datadog_api_client.v2.model.container_timeseries_query import ContainerTimeseriesQuery return { "oneOf": [ MetricsTimeseriesQuery, EventsTimeseriesQuery, + ApmResourceStatsQuery, + ApmMetricsQuery, + ApmDependencyStatsQuery, + SloQuery, + ProcessTimeseriesQuery, + ContainerTimeseriesQuery, ], } diff --git a/src/datadog_api_client/v2/models/__init__.py b/src/datadog_api_client/v2/models/__init__.py index b3a7bf728c..711cc8a542 100644 --- a/src/datadog_api_client/v2/models/__init__.py +++ b/src/datadog_api_client/v2/models/__init__.py @@ -195,6 +195,15 @@ from datadog_api_client.v2.model.any_value import AnyValue from datadog_api_client.v2.model.any_value_item import AnyValueItem from datadog_api_client.v2.model.any_value_object import AnyValueObject +from datadog_api_client.v2.model.apm_dependency_stat_name import ApmDependencyStatName +from datadog_api_client.v2.model.apm_dependency_stats_data_source import ApmDependencyStatsDataSource +from datadog_api_client.v2.model.apm_dependency_stats_query import ApmDependencyStatsQuery +from datadog_api_client.v2.model.apm_metrics_data_source import ApmMetricsDataSource +from datadog_api_client.v2.model.apm_metrics_query import ApmMetricsQuery +from datadog_api_client.v2.model.apm_metrics_stat import ApmMetricsStat +from datadog_api_client.v2.model.apm_resource_stat_name import ApmResourceStatName +from datadog_api_client.v2.model.apm_resource_stats_data_source import ApmResourceStatsDataSource +from datadog_api_client.v2.model.apm_resource_stats_query import ApmResourceStatsQuery from datadog_api_client.v2.model.apm_retention_filter_type import ApmRetentionFilterType from datadog_api_client.v2.model.app_builder_event import AppBuilderEvent from datadog_api_client.v2.model.app_builder_event_name import AppBuilderEventName @@ -1078,6 +1087,7 @@ from datadog_api_client.v2.model.connections_response_meta import ConnectionsResponseMeta from datadog_api_client.v2.model.container import Container from datadog_api_client.v2.model.container_attributes import ContainerAttributes +from datadog_api_client.v2.model.container_data_source import ContainerDataSource from datadog_api_client.v2.model.container_group import ContainerGroup from datadog_api_client.v2.model.container_group_attributes import ContainerGroupAttributes from datadog_api_client.v2.model.container_group_relationships import ContainerGroupRelationships @@ -1107,6 +1117,8 @@ from datadog_api_client.v2.model.container_meta import ContainerMeta from datadog_api_client.v2.model.container_meta_page import ContainerMetaPage from datadog_api_client.v2.model.container_meta_page_type import ContainerMetaPageType +from datadog_api_client.v2.model.container_scalar_query import ContainerScalarQuery +from datadog_api_client.v2.model.container_timeseries_query import ContainerTimeseriesQuery from datadog_api_client.v2.model.container_type import ContainerType from datadog_api_client.v2.model.containers_response import ContainersResponse from datadog_api_client.v2.model.containers_response_links import ContainersResponseLinks @@ -4592,12 +4604,15 @@ from datadog_api_client.v2.model.powerpacks_response_meta import PowerpacksResponseMeta from datadog_api_client.v2.model.powerpacks_response_meta_pagination import PowerpacksResponseMetaPagination from datadog_api_client.v2.model.preview_entity_response_data import PreviewEntityResponseData +from datadog_api_client.v2.model.process_data_source import ProcessDataSource +from datadog_api_client.v2.model.process_scalar_query import ProcessScalarQuery from datadog_api_client.v2.model.process_summaries_meta import ProcessSummariesMeta from datadog_api_client.v2.model.process_summaries_meta_page import ProcessSummariesMetaPage from datadog_api_client.v2.model.process_summaries_response import ProcessSummariesResponse from datadog_api_client.v2.model.process_summary import ProcessSummary from datadog_api_client.v2.model.process_summary_attributes import ProcessSummaryAttributes from datadog_api_client.v2.model.process_summary_type import ProcessSummaryType +from datadog_api_client.v2.model.process_timeseries_query import ProcessTimeseriesQuery from datadog_api_client.v2.model.product_analytics_analytics_query import ProductAnalyticsAnalyticsQuery from datadog_api_client.v2.model.product_analytics_analytics_request import ProductAnalyticsAnalyticsRequest from datadog_api_client.v2.model.product_analytics_analytics_request_attributes import ( @@ -5861,6 +5876,8 @@ from datadog_api_client.v2.model.slack_integration_metadata import SlackIntegrationMetadata from datadog_api_client.v2.model.slack_integration_metadata_channel_item import SlackIntegrationMetadataChannelItem from datadog_api_client.v2.model.slack_trigger_wrapper import SlackTriggerWrapper +from datadog_api_client.v2.model.slo_data_source import SloDataSource +from datadog_api_client.v2.model.slo_query import SloQuery from datadog_api_client.v2.model.slo_report_create_request import SloReportCreateRequest from datadog_api_client.v2.model.slo_report_create_request_attributes import SloReportCreateRequestAttributes from datadog_api_client.v2.model.slo_report_create_request_data import SloReportCreateRequestData @@ -5868,6 +5885,9 @@ from datadog_api_client.v2.model.slo_status_data_attributes import SloStatusDataAttributes from datadog_api_client.v2.model.slo_status_response import SloStatusResponse from datadog_api_client.v2.model.slo_status_type import SloStatusType +from datadog_api_client.v2.model.slos_group_mode import SlosGroupMode +from datadog_api_client.v2.model.slos_measure import SlosMeasure +from datadog_api_client.v2.model.slos_query_type import SlosQueryType from datadog_api_client.v2.model.snapshot import Snapshot from datadog_api_client.v2.model.snapshot_array import SnapshotArray from datadog_api_client.v2.model.snapshot_create_request import SnapshotCreateRequest @@ -7014,6 +7034,15 @@ "AnyValue", "AnyValueItem", "AnyValueObject", + "ApmDependencyStatName", + "ApmDependencyStatsDataSource", + "ApmDependencyStatsQuery", + "ApmMetricsDataSource", + "ApmMetricsQuery", + "ApmMetricsStat", + "ApmResourceStatName", + "ApmResourceStatsDataSource", + "ApmResourceStatsQuery", "ApmRetentionFilterType", "AppBuilderEvent", "AppBuilderEventName", @@ -7653,6 +7682,7 @@ "ConnectionsResponseMeta", "Container", "ContainerAttributes", + "ContainerDataSource", "ContainerGroup", "ContainerGroupAttributes", "ContainerGroupRelationships", @@ -7680,6 +7710,8 @@ "ContainerMeta", "ContainerMetaPage", "ContainerMetaPageType", + "ContainerScalarQuery", + "ContainerTimeseriesQuery", "ContainerType", "ContainersResponse", "ContainersResponseLinks", @@ -10045,12 +10077,15 @@ "PowerpacksResponseMeta", "PowerpacksResponseMetaPagination", "PreviewEntityResponseData", + "ProcessDataSource", + "ProcessScalarQuery", "ProcessSummariesMeta", "ProcessSummariesMetaPage", "ProcessSummariesResponse", "ProcessSummary", "ProcessSummaryAttributes", "ProcessSummaryType", + "ProcessTimeseriesQuery", "ProductAnalyticsAnalyticsQuery", "ProductAnalyticsAnalyticsRequest", "ProductAnalyticsAnalyticsRequestAttributes", @@ -10944,6 +10979,8 @@ "SlackIntegrationMetadata", "SlackIntegrationMetadataChannelItem", "SlackTriggerWrapper", + "SloDataSource", + "SloQuery", "SloReportCreateRequest", "SloReportCreateRequestAttributes", "SloReportCreateRequestData", @@ -10951,6 +10988,9 @@ "SloStatusDataAttributes", "SloStatusResponse", "SloStatusType", + "SlosGroupMode", + "SlosMeasure", + "SlosQueryType", "Snapshot", "SnapshotArray", "SnapshotCreateRequest", diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..a290b6c8ad --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:29.994Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..9a936323ee --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.yaml @@ -0,0 +1,25 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658329000,"queries":[{"data_source":"apm_dependency_stats","env":"ci","name":"a","operation_name":"cassandra.query","primary_tag_name":"datacenter","primary_tag_value":"edge-eu1.prod.dog","resource_name":"DELETE + FROM monitor_history.monitor_state_change_history WHERE org_id = ? AND monitor_id + IN ? AND group = ?","service":"cassandra","stat":"avg_duration"}],"to":1775661929000},"type":"scalar_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/scalar + response: + body: + string: "{\"data\":{\"id\":\"0\",\"type\":\"scalar_response\",\"attributes\"\ + :{\"columns\":[{\"name\":\"a\",\"values\":[],\"type\":\"number\",\"meta\"\ + :{\"unit\":[{\"family\":\"time\",\"name\":\"microsecond\",\"plural\":\"microseconds\"\ + ,\"scale_factor\":0.000001,\"short_name\":\"\u03BCs\",\"id\":9},null]}}]}}}" + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_metrics_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_metrics_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..936f633689 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_metrics_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:31.055Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_metrics_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_metrics_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..5d8f76d712 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_metrics_data_source_returns_ok_response.yaml @@ -0,0 +1,20 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658331000,"queries":[{"data_source":"apm_metrics","group_by":["resource_name"],"name":"a","query_filter":"env:prod","service":"web-store","stat":"hits"}],"to":1775661931000},"type":"scalar_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/scalar + response: + body: + string: '{"data":{"id":"0","type":"scalar_response","attributes":{"columns":[{"name":"a","values":[],"type":"number","meta":{"unit":null}}]}}}' + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..35a011fa28 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:31.243Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..3c5ab50bfc --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.yaml @@ -0,0 +1,20 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658331000,"queries":[{"data_source":"apm_resource_stats","env":"staging","group_by":["resource_name"],"name":"a","operation_name":"cassandra.query","primary_tag_name":"datacenter","primary_tag_value":"*","service":"azure-bill-import","stat":"hits"}],"to":1775661931000},"type":"scalar_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/scalar + response: + body: + string: '{"data":{"id":"0","type":"scalar_response","attributes":{"columns":[{"name":"a","values":[],"type":"number","meta":{"unit":null}}]}}}' + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_container_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_container_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..5a73cccc28 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_container_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:31.666Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_container_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_container_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..48c2eaefc2 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_container_data_source_returns_ok_response.yaml @@ -0,0 +1,20 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658331000,"queries":[{"aggregator":"avg","data_source":"container","limit":10,"metric":"process.stat.container.cpu.system_pct","name":"a","sort":"desc","tag_filters":[]}],"to":1775661931000},"type":"scalar_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/scalar + response: + body: + string: '{"data":{"id":"0","type":"scalar_response","attributes":{"columns":[{"name":"a","values":[],"type":"number","meta":{"unit":[{"family":"percentage","name":"percent","plural":"percent","scale_factor":1.0,"short_name":"%","id":17},null]}}]}}}' + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_process_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_process_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..aeb3bb6c4b --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_process_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:31.838Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_process_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_process_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..723b150c8f --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_process_data_source_returns_ok_response.yaml @@ -0,0 +1,20 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658331000,"queries":[{"aggregator":"avg","data_source":"process","is_normalized_cpu":false,"limit":10,"metric":"process.stat.cpu.total_pct","name":"a","sort":"desc","tag_filters":[],"text_filter":""}],"to":1775661931000},"type":"scalar_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/scalar + response: + body: + string: '{"data":{"id":"0","type":"scalar_response","attributes":{"columns":[{"name":"a","values":[],"type":"number","meta":{"unit":[{"family":"percentage","name":"percent","plural":"percent","scale_factor":1.0,"short_name":"%","id":17},null]}}]}}}' + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_slo_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_slo_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..1271a7e2cd --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_slo_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:32.018Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_slo_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_slo_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..3a3dbe8126 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_scalar_cross_product_query_with_slo_data_source_returns_ok_response.yaml @@ -0,0 +1,20 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658332000,"queries":[{"additional_query_filters":"*","data_source":"slo","group_mode":"overall","measure":"slo_status","name":"a","slo_id":"12345678910","slo_query_type":"metric"}],"to":1775661932000},"type":"scalar_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/scalar + response: + body: + string: '{"data":{"id":"0","type":"scalar_response","attributes":{"columns":[{"name":"a","values":[],"type":"number","meta":{"unit":[{"family":"percentage","name":"percent","plural":"percent","scale_factor":1.0,"short_name":"%","id":17},null]}}]}}}' + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..8d85e07f10 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:32.147Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..63802e9378 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_dependency_stats_data_source_returns_ok_response.yaml @@ -0,0 +1,24 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658332000,"interval":5000,"queries":[{"data_source":"apm_dependency_stats","env":"ci","name":"a","operation_name":"cassandra.query","primary_tag_name":"datacenter","primary_tag_value":"edge-eu1.prod.dog","resource_name":"DELETE + FROM monitor_history.monitor_state_change_history WHERE org_id = ? AND monitor_id + IN ? AND group = ?","service":"cassandra","stat":"avg_duration"}],"to":1775661932000},"type":"timeseries_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/timeseries + response: + body: + string: '{"data":{"id":"0","type":"timeseries_response","attributes":{"series":[],"times":[],"values":[]}}} + + ' + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_metrics_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_metrics_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..316785c3cf --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_metrics_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:32.363Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_metrics_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_metrics_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..afa117bc1b --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_metrics_data_source_returns_ok_response.yaml @@ -0,0 +1,22 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658332000,"interval":5000,"queries":[{"data_source":"apm_metrics","group_by":["resource_name"],"name":"a","query_filter":"env:prod","service":"web-store","stat":"hits"}],"to":1775661932000},"type":"timeseries_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/timeseries + response: + body: + string: '{"data":{"id":"0","type":"timeseries_response","attributes":{"series":[],"times":[],"values":[]}}} + + ' + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..c4bed5aa5b --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:32.570Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..ba5864b377 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_apm_resource_stats_data_source_returns_ok_response.yaml @@ -0,0 +1,22 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658332000,"interval":5000,"queries":[{"data_source":"apm_resource_stats","env":"staging","group_by":["resource_name"],"name":"a","operation_name":"cassandra.query","primary_tag_name":"datacenter","primary_tag_value":"*","service":"azure-bill-import","stat":"hits"}],"to":1775661932000},"type":"timeseries_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/timeseries + response: + body: + string: '{"data":{"id":"0","type":"timeseries_response","attributes":{"series":[],"times":[],"values":[]}}} + + ' + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_container_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_container_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..0b3ec01894 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_container_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:32.715Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_container_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_container_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..5b12acecff --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_container_data_source_returns_ok_response.yaml @@ -0,0 +1,22 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658332000,"interval":5000,"queries":[{"data_source":"container","limit":10,"metric":"process.stat.container.cpu.system_pct","name":"a","sort":"desc","tag_filters":[]}],"to":1775661932000},"type":"timeseries_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/timeseries + response: + body: + string: '{"data":{"id":"0","type":"timeseries_response","attributes":{"series":[],"times":[],"values":[]}}} + + ' + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_process_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_process_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..1ef87ffbe1 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_process_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:32.912Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_process_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_process_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..e90e6776fa --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_process_data_source_returns_ok_response.yaml @@ -0,0 +1,22 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658332000,"interval":5000,"queries":[{"data_source":"process","is_normalized_cpu":false,"limit":10,"metric":"process.stat.cpu.total_pct","name":"a","sort":"desc","tag_filters":[],"text_filter":""}],"to":1775661932000},"type":"timeseries_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/timeseries + response: + body: + string: '{"data":{"id":"0","type":"timeseries_response","attributes":{"series":[],"times":[],"values":[]}}} + + ' + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_slo_data_source_returns_ok_response.frozen b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_slo_data_source_returns_ok_response.frozen new file mode 100644 index 0000000000..03ed54b79c --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_slo_data_source_returns_ok_response.frozen @@ -0,0 +1 @@ +2026-04-08T15:25:33.123Z \ No newline at end of file diff --git a/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_slo_data_source_returns_ok_response.yaml b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_slo_data_source_returns_ok_response.yaml new file mode 100644 index 0000000000..232ae9b5f7 --- /dev/null +++ b/tests/v2/cassettes/test_scenarios/test_timeseries_cross_product_query_with_slo_data_source_returns_ok_response.yaml @@ -0,0 +1,22 @@ +interactions: +- request: + body: '{"data":{"attributes":{"formulas":[{"formula":"a","limit":{"count":10,"order":"desc"}}],"from":1775658333000,"interval":5000,"queries":[{"additional_query_filters":"*","data_source":"slo","group_mode":"overall","measure":"slo_status","name":"a","slo_id":"12345678910","slo_query_type":"metric"}],"to":1775661933000},"type":"timeseries_request"}}' + headers: + accept: + - application/json + content-type: + - application/json + method: POST + uri: https://api.datadoghq.com/api/v2/query/timeseries + response: + body: + string: '{"data":{"id":"0","type":"timeseries_response","attributes":{"series":[],"times":[],"values":[]}}} + + ' + headers: + content-type: + - application/vnd.api+json + status: + code: 200 + message: OK +version: 1 diff --git a/tests/v2/features/metrics.feature b/tests/v2/features/metrics.feature index 41e753dfb3..c880fa0d23 100644 --- a/tests/v2/features/metrics.feature +++ b/tests/v2/features/metrics.feature @@ -344,6 +344,60 @@ Feature: Metrics And the response "data.type" is equal to "scalar_response" And the response "data.attributes.columns[0].name" is equal to "a" + @skip-validation @team:Datadog/timeseries-query + Scenario: Scalar cross product query with apm_dependency_stats data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryScalarData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "queries": [{"data_source": "apm_dependency_stats", "name": "a", "env": "ci", "service": "cassandra", "stat": "avg_duration", "operation_name": "cassandra.query", "resource_name": "DELETE FROM monitor_history.monitor_state_change_history WHERE org_id = ? AND monitor_id IN ? AND group = ?", "primary_tag_name": "datacenter", "primary_tag_value": "edge-eu1.prod.dog"}], "to": {{ timestamp('now') }}000}, "type": "scalar_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "scalar_response" + + @skip-validation @team:Datadog/timeseries-query + Scenario: Scalar cross product query with apm_metrics data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryScalarData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "queries": [{"data_source": "apm_metrics", "name": "a", "stat": "hits", "service": "web-store", "query_filter": "env:prod", "group_by": ["resource_name"]}], "to": {{ timestamp('now') }}000}, "type": "scalar_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "scalar_response" + + @skip-validation @team:Datadog/timeseries-query + Scenario: Scalar cross product query with apm_resource_stats data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryScalarData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "queries": [{"data_source": "apm_resource_stats", "name": "a", "env": "staging", "service": "azure-bill-import", "stat": "hits", "operation_name": "cassandra.query", "group_by": ["resource_name"], "primary_tag_name": "datacenter", "primary_tag_value": "*"}], "to": {{ timestamp('now') }}000}, "type": "scalar_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "scalar_response" + + @skip-validation @team:Datadog/timeseries-query + Scenario: Scalar cross product query with container data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryScalarData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "queries": [{"data_source": "container", "name": "a", "metric": "process.stat.container.cpu.system_pct", "aggregator": "avg", "tag_filters": [], "limit": 10, "sort": "desc"}], "to": {{ timestamp('now') }}000}, "type": "scalar_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "scalar_response" + + @skip-validation @team:Datadog/timeseries-query + Scenario: Scalar cross product query with process data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryScalarData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "queries": [{"data_source": "process", "name": "a", "metric": "process.stat.cpu.total_pct", "aggregator": "avg", "text_filter": "", "tag_filters": [], "limit": 10, "sort": "desc", "is_normalized_cpu": false}], "to": {{ timestamp('now') }}000}, "type": "scalar_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "scalar_response" + + @skip-validation @team:Datadog/timeseries-query + Scenario: Scalar cross product query with slo data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryScalarData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "queries": [{"data_source": "slo", "name": "a", "slo_id": "12345678910", "measure": "slo_status", "slo_query_type": "metric", "group_mode": "overall", "additional_query_filters": "*"}], "to": {{ timestamp('now') }}000}, "type": "scalar_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "scalar_response" + @generated @skip @team:DataDog/metrics-intake Scenario: Submit metrics returns "Bad Request" response Given new "SubmitMetrics" request @@ -415,6 +469,60 @@ Feature: Metrics Then the response status is 200 OK And the response "data.type" is equal to "timeseries_response" + @skip-validation @team:Datadog/timeseries-query + Scenario: Timeseries cross product query with apm_dependency_stats data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryTimeseriesData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "interval": 5000, "queries": [{"data_source": "apm_dependency_stats", "name": "a", "env": "ci", "service": "cassandra", "stat": "avg_duration", "operation_name": "cassandra.query", "resource_name": "DELETE FROM monitor_history.monitor_state_change_history WHERE org_id = ? AND monitor_id IN ? AND group = ?", "primary_tag_name": "datacenter", "primary_tag_value": "edge-eu1.prod.dog"}], "to": {{ timestamp('now') }}000}, "type": "timeseries_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "timeseries_response" + + @skip-validation @team:Datadog/timeseries-query + Scenario: Timeseries cross product query with apm_metrics data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryTimeseriesData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "interval": 5000, "queries": [{"data_source": "apm_metrics", "name": "a", "stat": "hits", "service": "web-store", "query_filter": "env:prod", "group_by": ["resource_name"]}], "to": {{ timestamp('now') }}000}, "type": "timeseries_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "timeseries_response" + + @skip-validation @team:Datadog/timeseries-query + Scenario: Timeseries cross product query with apm_resource_stats data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryTimeseriesData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "interval": 5000, "queries": [{"data_source": "apm_resource_stats", "name": "a", "env": "staging", "service": "azure-bill-import", "stat": "hits", "operation_name": "cassandra.query", "group_by": ["resource_name"], "primary_tag_name": "datacenter", "primary_tag_value": "*"}], "to": {{ timestamp('now') }}000}, "type": "timeseries_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "timeseries_response" + + @skip-validation @team:Datadog/timeseries-query + Scenario: Timeseries cross product query with container data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryTimeseriesData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "interval": 5000, "queries": [{"data_source": "container", "name": "a", "metric": "process.stat.container.cpu.system_pct", "tag_filters": [], "limit": 10, "sort": "desc"}], "to": {{ timestamp('now') }}000}, "type": "timeseries_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "timeseries_response" + + @skip-validation @team:Datadog/timeseries-query + Scenario: Timeseries cross product query with process data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryTimeseriesData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "interval": 5000, "queries": [{"data_source": "process", "name": "a", "metric": "process.stat.cpu.total_pct", "text_filter": "", "tag_filters": [], "limit": 10, "sort": "desc", "is_normalized_cpu": false}], "to": {{ timestamp('now') }}000}, "type": "timeseries_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "timeseries_response" + + @skip-validation @team:Datadog/timeseries-query + Scenario: Timeseries cross product query with slo data source returns "OK" response + Given a valid "appKeyAuth" key in the system + And new "QueryTimeseriesData" request + And body with value {"data": {"attributes": {"formulas": [{"formula": "a", "limit": {"count": 10, "order": "desc"}}], "from": {{ timestamp('now - 1h') }}000, "interval": 5000, "queries": [{"data_source": "slo", "name": "a", "slo_id": "12345678910", "measure": "slo_status", "slo_query_type": "metric", "group_mode": "overall", "additional_query_filters": "*"}], "to": {{ timestamp('now') }}000}, "type": "timeseries_request"}} + When the request is sent + Then the response status is 200 OK + And the response "data.type" is equal to "timeseries_response" + @generated @skip @team:DataDog/metrics-experience Scenario: Update a tag configuration returns "Bad Request" response Given a valid "appKeyAuth" key in the system