diff --git a/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc b/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc index dd2c9dc6b..2f9f59595 100644 --- a/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc +++ b/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc @@ -3,16 +3,17 @@ [source, python] ---- -resp = client.inference.stream_inference( - task_type="chat_completion", +resp = client.inference.chat_completion_unified( inference_id="openai-completion", - model="gpt-4o", - messages=[ - { - "role": "user", - "content": "What is Elastic?" - } - ], + chat_completion_request={ + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "What is Elastic?" + } + ] + }, ) print(resp) ---- diff --git a/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc b/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc index 442cbc631..4c4295aaf 100644 --- a/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc +++ b/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc @@ -3,8 +3,7 @@ [source, python] ---- -resp = client.inference.inference( - task_type="sparse_embedding", +resp = client.inference.sparse_embedding( inference_id="my-elser-model", input="The sky above the port was the color of television tuned to a dead channel.", ) diff --git a/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc b/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc index cdff938a9..a6a288c6c 100644 --- a/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc +++ b/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc @@ -3,41 +3,42 @@ [source, python] ---- -resp = client.inference.stream_inference( - task_type="chat_completion", +resp = client.inference.chat_completion_unified( inference_id="openai-completion", - messages=[ - { - "role": "user", - "content": [ - { - "type": "text", - "text": "What's the price of a scarf?" - } - ] - } - ], - tools=[ - { - "type": "function", - "function": { - "name": "get_current_price", - "description": "Get the current price of a item", - "parameters": { - "type": "object", - "properties": { - "item": { - "id": "123" + chat_completion_request={ + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What's the price of a scarf?" + } + ] + } + ], + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_price", + "description": "Get the current price of a item", + "parameters": { + "type": "object", + "properties": { + "item": { + "id": "123" + } } } } } - } - ], - tool_choice={ - "type": "function", - "function": { - "name": "get_current_price" + ], + "tool_choice": { + "type": "function", + "function": { + "name": "get_current_price" + } } }, ) diff --git a/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc b/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc index b7583a76d..575393f08 100644 --- a/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc +++ b/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc @@ -3,8 +3,7 @@ [source, python] ---- -resp = client.inference.stream_inference( - task_type="completion", +resp = client.inference.stream_completion( inference_id="openai-completion", input="What is Elastic?", ) diff --git a/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc b/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc index 9e552ae3c..06d02bd82 100644 --- a/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc +++ b/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc @@ -3,8 +3,7 @@ [source, python] ---- -resp = client.inference.inference( - task_type="text_embedding", +resp = client.inference.text_embedding( inference_id="my-cohere-endpoint", input="The sky above the port was the color of television tuned to a dead channel.", task_settings={ diff --git a/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc b/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc index 8bbb6682c..b8574f3ff 100644 --- a/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc +++ b/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc @@ -3,30 +3,31 @@ [source, python] ---- -resp = client.inference.stream_inference( - task_type="chat_completion", +resp = client.inference.chat_completion_unified( inference_id="openai-completion", - messages=[ - { - "role": "assistant", - "content": "Let's find out what the weather is", - "tool_calls": [ - { - "id": "call_KcAjWtAww20AihPHphUh46Gd", - "type": "function", - "function": { - "name": "get_current_weather", - "arguments": "{\"location\":\"Boston, MA\"}" + chat_completion_request={ + "messages": [ + { + "role": "assistant", + "content": "Let's find out what the weather is", + "tool_calls": [ + { + "id": "call_KcAjWtAww20AihPHphUh46Gd", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\":\"Boston, MA\"}" + } } - } - ] - }, - { - "role": "tool", - "content": "The weather is cold", - "tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd" - } - ], + ] + }, + { + "role": "tool", + "content": "The weather is cold", + "tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd" + } + ] + }, ) print(resp) ---- diff --git a/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc b/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc index fe563aefe..a22a77c39 100644 --- a/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc +++ b/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc @@ -3,8 +3,7 @@ [source, python] ---- -resp = client.inference.inference( - task_type="completion", +resp = client.inference.completion( inference_id="openai_chat_completions", input="What is Elastic?", ) diff --git a/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc b/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc index a23aeb237..4f7e6b403 100644 --- a/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc +++ b/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc @@ -3,8 +3,7 @@ [source, python] ---- -resp = client.inference.inference( - task_type="rerank", +resp = client.inference.rerank( inference_id="cohere_rerank", input=[ "luke", diff --git a/docs/guide/release-notes.asciidoc b/docs/guide/release-notes.asciidoc index d3c7867fb..f9559db86 100644 --- a/docs/guide/release-notes.asciidoc +++ b/docs/guide/release-notes.asciidoc @@ -1,6 +1,39 @@ [[release-notes]] == Release notes +=== 8.18.0 (2025-04-15) + +- Merge `Elasticsearch-DSL `_ package (https://github.com/elastic/elasticsearch-py/pull/2736[#2736]) +- Add Python DSL documentation (https://github.com/elastic/elasticsearch-py/pull/2761[#2761]) +- Autogenerate DSL field classes from schema (https://github.com/elastic/elasticsearch-py/pull/2780[#2780]) +- Document use of sub-clients (https://github.com/elastic/elasticsearch-py/pull/2798[#2798]) +- Improve DSL documentation examples with class-based queries and type hints (https://github.com/elastic/elasticsearch-py/pull/2857[#2857]) +- Document the use of `param()` in Python DSL methods (https://github.com/elastic/elasticsearch-py/pull/2861[#2861]) +- Fix `simulate` sub-client documentation (https://github.com/elastic/elasticsearch-py/pull/2749[#2749])[#2749]) +- Update APIs + * Remove `wait_for_active_shards` from experimental Get field usage stats API + * Rename incorrect `access_token` to `token` in Logout of OpenID Connect API + * Add inference APIs: Alibaba Cloud AI Search, Amazon Bedrock, Anthropic, Azure AI Studio, Azure OpenAI, Cohere, Elasticsearch, ELSER, Google AI Studio, Google Vertex AI, Hugging Face, Jina AI, Mistral, OpenAI, and Voyage AI + * Add Reindex legacy backing indices APIs + * Add Create an index from a source index API + * Add `include_source_on_error` to Create, Index, Update and Bulk APIs + * Add Stop async ES|QL query API + * Add `timeout` to Resolve Cluster API + * Add `adaptive_allocations` body field to Start and Update a trained model deployment API + * Rename `index_template_subtitutions` to `index_template_substitutions` in Simulate data ingestion API* Add `if_primary_term`, `if_seq_no`, `op_type`, `require_alias` and `require_data_stream` to Create API + * Add `max_concurrent_shard_requests` to Open point in time API + * Add `local` and `flat_settings` to Check index templates API + * Add `reopen` to Update index settings API + * Add `resource` to Reload search analyzer API + * Add `lazy` to Roll over to a new index API + * Add `cause` and `create` to Simulate index template APIs + * Add Elastic Inference Service (EIS) chat completion + * Add inference APIs: Alibaba Cloud AI Search, Amazon Bedrock, Anthropic, Azure AI Studio, Azure OpenAI, Cohere, Elastic Inference Service (EIS), Elasticsearch, ELSER, Google AI Studio, Google Vertex AI, Hugging Face, Jina AI, Mistral, OpenAI, and Voyage AI +- Update DSL + * Add `ignore_malformed`, `script`, `on_script_error` and `time_series_dimension` to Boolean field + * Add `index` to GeoShape field + * Add `search_inference_id` to SemanticText field + [discrete] [[rn-8-17-2]] === 8.17.2 (2025-03-04) diff --git a/elasticsearch/_version.py b/elasticsearch/_version.py index 00e2789aa..030a7ff29 100644 --- a/elasticsearch/_version.py +++ b/elasticsearch/_version.py @@ -15,4 +15,4 @@ # specific language governing permissions and limitations # under the License. -__versionstr__ = "8.17.2" +__versionstr__ = "8.18.0" diff --git a/utils/generate-docs-examples/package-lock.json b/utils/generate-docs-examples/package-lock.json index 7c255e572..26899312f 100644 --- a/utils/generate-docs-examples/package-lock.json +++ b/utils/generate-docs-examples/package-lock.json @@ -17,9 +17,9 @@ } }, "node_modules/@elastic/request-converter": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/@elastic/request-converter/-/request-converter-8.18.0.tgz", - "integrity": "sha512-xEIB17voGulAfBThFqqtk8Osc+dNHiCqN9GW0Nf6PunNdvmAT5YvMb6u4NNI+NPAxNu90ak396g+ThjH9VRGIw==", + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/@elastic/request-converter/-/request-converter-8.18.1.tgz", + "integrity": "sha512-c5Q0aIxfK0RfkHhqX3sMsMmBwo1iNJviJezRNDZ006JCASGE3peAXKlyGiFcgV5MCxW3X0KHUdz/AEOdCCMXig==", "license": "Apache-2.0", "dependencies": { "base64url": "^3.0.1",