Skip to content

Commit d0adad3

Browse files
Auto-generated API code
1 parent 2804b0a commit d0adad3

File tree

5 files changed

+289
-7
lines changed

5 files changed

+289
-7
lines changed

elasticsearch/_async/client/logstash.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,9 @@ async def put_pipeline(
141141
142142
`<https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-logstash-put-pipeline>`_
143143
144-
:param id: An identifier for the pipeline.
144+
:param id: An identifier for the pipeline. Pipeline IDs must begin with a letter
145+
or underscore and contain only letters, underscores, dashes, hyphens and
146+
numbers.
145147
:param pipeline:
146148
"""
147149
if id in SKIP_IN_PATH:

elasticsearch/_sync/client/logstash.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,9 @@ def put_pipeline(
141141
142142
`<https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-logstash-put-pipeline>`_
143143
144-
:param id: An identifier for the pipeline.
144+
:param id: An identifier for the pipeline. Pipeline IDs must begin with a letter
145+
or underscore and contain only letters, underscores, dashes, hyphens and
146+
numbers.
145147
:param pipeline:
146148
"""
147149
if id in SKIP_IN_PATH:

elasticsearch/dsl/aggs.py

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -643,6 +643,54 @@ def __init__(
643643
)
644644

645645

646+
class CartesianBounds(Agg[_R]):
647+
"""
648+
A metric aggregation that computes the spatial bounding box containing
649+
all values for a Point or Shape field.
650+
651+
:arg field: The field on which to run the aggregation.
652+
:arg missing: The value to apply to documents that do not have a
653+
value. By default, documents without a value are ignored.
654+
:arg script:
655+
"""
656+
657+
name = "cartesian_bounds"
658+
659+
def __init__(
660+
self,
661+
*,
662+
field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
663+
missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
664+
script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
665+
**kwargs: Any,
666+
):
667+
super().__init__(field=field, missing=missing, script=script, **kwargs)
668+
669+
670+
class CartesianCentroid(Agg[_R]):
671+
"""
672+
A metric aggregation that computes the weighted centroid from all
673+
coordinate values for point and shape fields.
674+
675+
:arg field: The field on which to run the aggregation.
676+
:arg missing: The value to apply to documents that do not have a
677+
value. By default, documents without a value are ignored.
678+
:arg script:
679+
"""
680+
681+
name = "cartesian_centroid"
682+
683+
def __init__(
684+
self,
685+
*,
686+
field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
687+
missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
688+
script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
689+
**kwargs: Any,
690+
):
691+
super().__init__(field=field, missing=missing, script=script, **kwargs)
692+
693+
646694
class CategorizeText(Bucket[_R]):
647695
"""
648696
A multi-bucket aggregation that groups semi-structured text into
@@ -725,6 +773,43 @@ def __init__(
725773
)
726774

727775

776+
class ChangePoint(Pipeline[_R]):
777+
"""
778+
A sibling pipeline that detects, spikes, dips, and change points in a
779+
metric. Given a distribution of values provided by the sibling multi-
780+
bucket aggregation, this aggregation indicates the bucket of any spike
781+
or dip and/or the bucket at which the largest change in the
782+
distribution of values, if they are statistically significant. There
783+
must be at least 22 bucketed values. Fewer than 1,000 is preferred.
784+
785+
:arg format: `DecimalFormat` pattern for the output value. If
786+
specified, the formatted value is returned in the aggregation’s
787+
`value_as_string` property.
788+
:arg gap_policy: Policy to apply when gaps are found in the data.
789+
Defaults to `skip` if omitted.
790+
:arg buckets_path: Path to the buckets that contain one set of values
791+
to correlate.
792+
"""
793+
794+
name = "change_point"
795+
796+
def __init__(
797+
self,
798+
*,
799+
format: Union[str, "DefaultType"] = DEFAULT,
800+
gap_policy: Union[
801+
Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
802+
] = DEFAULT,
803+
buckets_path: Union[
804+
str, Sequence[str], Mapping[str, str], "DefaultType"
805+
] = DEFAULT,
806+
**kwargs: Any,
807+
):
808+
super().__init__(
809+
format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
810+
)
811+
812+
728813
class Children(Bucket[_R]):
729814
"""
730815
A single bucket aggregation that selects child documents that have the
@@ -2960,6 +3045,14 @@ class SignificantTerms(Bucket[_R]):
29603045
the foreground sample with a term divided by the number of
29613046
documents in the background with the term.
29623047
:arg script_heuristic: Customized score, implemented via a script.
3048+
:arg p_value: Significant terms heuristic that calculates the p-value
3049+
between the term existing in foreground and background sets. The
3050+
p-value is the probability of obtaining test results at least as
3051+
extreme as the results actually observed, under the assumption
3052+
that the null hypothesis is correct. The p-value is calculated
3053+
assuming that the foreground set and the background set are
3054+
independent https://en.wikipedia.org/wiki/Bernoulli_trial, with
3055+
the null hypothesis that the probabilities are the same.
29633056
:arg shard_min_doc_count: Regulates the certainty a shard has if the
29643057
term should actually be added to the candidate list or not with
29653058
respect to the `min_doc_count`. Terms will only be considered if
@@ -3013,6 +3106,9 @@ def __init__(
30133106
script_heuristic: Union[
30143107
"types.ScriptedHeuristic", Dict[str, Any], "DefaultType"
30153108
] = DEFAULT,
3109+
p_value: Union[
3110+
"types.PValueHeuristic", Dict[str, Any], "DefaultType"
3111+
] = DEFAULT,
30163112
shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
30173113
shard_size: Union[int, "DefaultType"] = DEFAULT,
30183114
size: Union[int, "DefaultType"] = DEFAULT,
@@ -3031,6 +3127,7 @@ def __init__(
30313127
mutual_information=mutual_information,
30323128
percentage=percentage,
30333129
script_heuristic=script_heuristic,
3130+
p_value=p_value,
30343131
shard_min_doc_count=shard_min_doc_count,
30353132
shard_size=shard_size,
30363133
size=size,

elasticsearch/dsl/response/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,10 +233,13 @@ def search_after(self) -> "SearchBase[_R]":
233233
"types.SimpleValueAggregate",
234234
"types.DerivativeAggregate",
235235
"types.BucketMetricValueAggregate",
236+
"types.ChangePointAggregate",
236237
"types.StatsAggregate",
237238
"types.StatsBucketAggregate",
238239
"types.ExtendedStatsAggregate",
239240
"types.ExtendedStatsBucketAggregate",
241+
"types.CartesianBoundsAggregate",
242+
"types.CartesianCentroidAggregate",
240243
"types.GeoBoundsAggregate",
241244
"types.GeoCentroidAggregate",
242245
"types.HistogramAggregate",

0 commit comments

Comments
 (0)