From f555c5e6895febb645a9f6465582a81286e7fb8a Mon Sep 17 00:00:00 2001 From: hizixin <19810781+hizixin@users.noreply.github.com> Date: Wed, 25 Mar 2026 21:39:57 -0700 Subject: [PATCH 1/2] azure-search-documents 11.7.0 GA release for API version 2026-04-01 Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../azure-search-documents/CHANGELOG.md | 167 +- .../azure-search-documents/_metadata.json | 4 +- .../apiview-properties.json | 64 +- sdk/search/azure-search-documents/assets.json | 2 +- .../azure/search/documents/_client.py | 8 +- .../azure/search/documents/_configuration.py | 10 +- .../documents/_operations/_operations.py | 255 +--- .../search/documents/_operations/_patch.py | 90 +- .../azure/search/documents/_patch.py | 6 +- .../search/documents/_utils/model_base.py | 21 +- .../azure/search/documents/_version.py | 2 +- .../azure/search/documents/aio/_client.py | 8 +- .../search/documents/aio/_configuration.py | 10 +- .../documents/aio/_operations/_operations.py | 160 +- .../documents/aio/_operations/_patch.py | 66 +- .../azure/search/documents/aio/_patch.py | 2 +- .../azure/search/documents/indexes/_client.py | 16 +- .../documents/indexes/_configuration.py | 20 +- .../indexes/_operations/_operations.py | 923 +++-------- .../documents/indexes/_operations/_patch.py | 115 -- .../documents/indexes/_utils/model_base.py | 21 +- .../search/documents/indexes/aio/_client.py | 16 +- .../documents/indexes/aio/_configuration.py | 20 +- .../indexes/aio/_operations/_operations.py | 540 ++----- .../indexes/aio/_operations/_patch.py | 113 -- .../documents/indexes/models/__init__.py | 52 +- .../search/documents/indexes/models/_enums.py | 206 +-- .../documents/indexes/models/_models.py | 1344 ++--------------- .../search/documents/indexes/models/_patch.py | 17 +- .../documents/knowledgebases/_client.py | 8 +- .../knowledgebases/_configuration.py | 10 +- .../knowledgebases/_operations/_operations.py | 38 +- .../knowledgebases/_utils/model_base.py | 21 +- .../documents/knowledgebases/aio/_client.py | 8 +- .../knowledgebases/aio/_configuration.py | 10 +- .../aio/_operations/_operations.py | 27 +- .../knowledgebases/models/__init__.py | 22 +- .../documents/knowledgebases/models/_enums.py | 25 - .../knowledgebases/models/_models.py | 549 +------ .../azure/search/documents/models/__init__.py | 36 - .../azure/search/documents/models/_enums.py | 221 --- .../azure/search/documents/models/_models.py | 518 +------ .../azure/search/documents/models/_patch.py | 3 +- .../azure-search-documents/pyproject.toml | 2 +- .../azure-search-documents/tests/conftest.py | 1 + .../tests/search_service_preparer.py | 7 +- .../test_knowledge_base_configuration_live.py | 47 +- ...knowledge_base_configuration_live_async.py | 46 +- .../tests/test_knowledge_base_live.py | 32 - .../tests/test_knowledge_base_live_async.py | 32 - ...knowledge_source_remote_sharepoint_live.py | 129 -- ...dge_source_remote_sharepoint_live_async.py | 130 -- .../tests/test_search_client.py | 8 - .../tests/test_search_client_async.py | 8 - .../tests/test_search_client_search_live.py | 40 - .../test_search_client_search_live_async.py | 37 - .../test_search_index_client_alias_live.py | 1 + ...st_search_index_client_alias_live_async.py | 1 + .../tests/test_search_index_client_live.py | 49 - .../test_search_index_client_live_async.py | 50 - .../test_search_index_client_skillset_live.py | 1 - ...search_index_client_skillset_live_async.py | 1 - .../azure-search-documents/tsp-location.yaml | 2 +- 63 files changed, 1087 insertions(+), 5311 deletions(-) delete mode 100644 sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live.py delete mode 100644 sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live_async.py diff --git a/sdk/search/azure-search-documents/CHANGELOG.md b/sdk/search/azure-search-documents/CHANGELOG.md index 7b2281ba9980..6789cd3ee054 100644 --- a/sdk/search/azure-search-documents/CHANGELOG.md +++ b/sdk/search/azure-search-documents/CHANGELOG.md @@ -1,15 +1,177 @@ # Release History -## 11.7.0b3 (Unreleased) +## 11.7.0 (2026-04-01) ### Features Added +- Knowledge Base support: + - Added `azure.search.documents.knowledgebases.KnowledgeBaseRetrievalClient` for knowledge retrieval operations. + - Added Knowledge Base and Knowledge Source management operations in `SearchIndexClient`. + - Added `azure.search.documents.indexes.models.KnowledgeBase` and related models. + - Added knowledge source types: `AzureBlobKnowledgeSource`, `WebKnowledgeSource`, `SearchIndexKnowledgeSource`, `IndexedOneLakeKnowledgeSource`. + - Added `KnowledgeRetrievalMinimalReasoningEffort` and `KnowledgeRetrievalReasoningEffort` models. + - Added `KnowledgeSourceSynchronizationError`, `KnowledgeSourceStatistics`, `KnowledgeSourceStatus` models. + +- Index and indexer enhancements: + - Added `SearchIndexerKnowledgeStore.identity` for managed identity support on knowledge store projections. + - Added `SearchIndexerDataSourceConnection.identity` for managed identity support on data source connections. + - Changed `SearchResourceEncryptionKey.key_vault_key_version` from required to optional, aligning with service behavior. + +- Markdown parsing mode: + - Added `BlobIndexerParsingMode.MARKDOWN` enum value for native Markdown file parsing in blob indexers. + - Added `IndexingParametersConfiguration.markdown_parsing_submode` (`oneToOne` or `oneToMany`) to control document splitting. + - Added `IndexingParametersConfiguration.markdown_header_depth` (`h1` through `h6`) to set header depth for sectioning. + +- New skills: + - Added `ChatCompletionSkill` and related models (`ChatCompletionResponseFormat`, `ChatCompletionSchema`, `ChatCompletionCommonModelParameters`). + - Added `ContentUnderstandingSkill` and related models. + +- Other new models and enums: + - Added `AIServices` model for AI service connections. + - Added `CompletedSynchronizationState` and `SynchronizationState` models. + ### Breaking Changes -### Bugs Fixed +The following changes are due to the migration from AutoRest to TypeSpec code generation and affect all users: + +- `SentimentSkillVersion` and `EntityRecognitionSkillVersion` are removed. Only the latest skill versions are supported. +- Model `serialize` and `deserialize` methods are removed. Use `as_dict` and constructor instead. + +> The following changes do not impact the API of stable versions such as 11.6.0. +> Only code written against a beta version such as 11.7.0b2 may be affected. + +- Below models do not exist in this release + - `azure.search.documents.indexes.models.AIServicesVisionParameters` + - `azure.search.documents.indexes.models.AIServicesVisionVectorizer` + - `azure.search.documents.indexes.models.AzureMachineLearningSkill` + - `azure.search.documents.indexes.models.AzureOpenAITokenizerParameters` + - `azure.search.documents.indexes.models.IndexedSharePointContainerName` + - `azure.search.documents.indexes.models.IndexerCurrentState` + - `azure.search.documents.indexes.models.IndexerExecutionStatusDetail` + - `azure.search.documents.indexes.models.IndexerPermissionOption` + - `azure.search.documents.indexes.models.IndexerRuntime` + - `azure.search.documents.indexes.models.IndexingMode` + - `azure.search.documents.indexes.models.IndexStatisticsSummary` + - `azure.search.documents.indexes.models.KnowledgeRetrievalLowReasoningEffort` + - `azure.search.documents.indexes.models.KnowledgeRetrievalMediumReasoningEffort` + - `azure.search.documents.indexes.models.KnowledgeRetrievalOutputMode` + - `azure.search.documents.indexes.models.PermissionFilter` + - `azure.search.documents.indexes.models.SearchIndexerCache` + - `azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreParameters` + - `azure.search.documents.indexes.models.SearchIndexPermissionFilterOption` + - `azure.search.documents.indexes.models.ServiceIndexersRuntime` + - `azure.search.documents.indexes.models.SplitSkillEncoderModelName` + - `azure.search.documents.indexes.models.SplitSkillUnit` + - `azure.search.documents.indexes.models.VisionVectorizeSkill` + - `azure.search.documents.knowledgebases.models.IndexedSharePointKnowledgeSourceParams` + - `azure.search.documents.knowledgebases.models.KnowledgeBaseIndexedSharePointReference` + - `azure.search.documents.knowledgebases.models.KnowledgeBaseModelAnswerSynthesisActivityRecord` + - `azure.search.documents.knowledgebases.models.KnowledgeBaseModelQueryPlanningActivityRecord` + - `azure.search.documents.knowledgebases.models.KnowledgeBaseRemoteSharePointReference` + - `azure.search.documents.knowledgebases.models.RemoteSharePointKnowledgeSourceParams` + - `azure.search.documents.models.HybridCountAndFacetMode` + - `azure.search.documents.models.HybridSearch` + - `azure.search.documents.models.QueryLanguage` + - `azure.search.documents.models.QueryResultDocumentInnerHit` + - `azure.search.documents.models.QueryResultDocumentRerankerInput` + - `azure.search.documents.models.QueryResultDocumentSemanticField` + - `azure.search.documents.models.QueryRewritesDebugInfo` + - `azure.search.documents.models.QueryRewritesType` + - `azure.search.documents.models.QueryRewritesValuesDebugInfo` + - `azure.search.documents.models.QuerySpellerType` + - `azure.search.documents.models.SearchScoreThreshold` + - `azure.search.documents.models.SemanticDebugInfo` + - `azure.search.documents.models.SemanticFieldState` + - `azure.search.documents.models.SemanticQueryRewritesResultType` + - `azure.search.documents.models.VectorSimilarityThreshold` + - `azure.search.documents.models.VectorThreshold` + - `azure.search.documents.models.VectorThresholdKind` + - SharePoint knowledge source types (`IndexedSharePointKnowledgeSource`, `RemoteSharePointKnowledgeSource` and related models including `IndexedSharePointKnowledgeSourceParameters`, `RemoteSharePointKnowledgeSourceParameters`, `SharePointSensitivityLabelInfo`) + +- Below properties do not exist in this release + - `azure.search.documents.indexes.models.ChatCompletionSkill.auth_resource_id` + - `azure.search.documents.indexes.models.ChatCompletionSkill.batch_size` + - `azure.search.documents.indexes.models.ChatCompletionSkill.degree_of_parallelism` + - `azure.search.documents.indexes.models.ChatCompletionSkill.http_headers` + - `azure.search.documents.indexes.models.ChatCompletionSkill.http_method` + - `azure.search.documents.indexes.models.ChatCompletionSkill.timeout` + - `azure.search.documents.indexes.models.IndexerExecutionResult.mode` + - `azure.search.documents.indexes.models.IndexerExecutionResult.status_detail` + - `azure.search.documents.indexes.models.KnowledgeBase.answer_instructions` + - `azure.search.documents.indexes.models.KnowledgeBase.output_mode` + - `azure.search.documents.indexes.models.KnowledgeBase.retrieval_instructions` + - `azure.search.documents.indexes.models.KnowledgeBase.retrieval_reasoning_effort` + - `azure.search.documents.indexes.models.SearchField.permission_filter` + - `azure.search.documents.indexes.models.SearchField.sensitivity_label` + - `azure.search.documents.indexes.models.SearchIndex.permission_filter_option` + - `azure.search.documents.indexes.models.SearchIndex.purview_enabled` + - `azure.search.documents.indexes.models.SearchIndexer.cache` + - `azure.search.documents.indexes.models.SearchIndexerDataSourceConnection.indexer_permission_options` + - `azure.search.documents.indexes.models.SearchIndexerDataSourceConnection.sub_type` + - `azure.search.documents.indexes.models.SearchIndexerDataUserAssignedIdentity.federated_identity_client_id` + - `azure.search.documents.indexes.models.SearchIndexerKnowledgeStore.parameters` + - `azure.search.documents.indexes.models.SearchIndexerStatus.current_state` + - `azure.search.documents.indexes.models.SearchIndexerStatus.runtime` + - `azure.search.documents.indexes.models.SearchServiceStatistics.indexers_runtime` + - `azure.search.documents.indexes.models.SemanticConfiguration.flighting_opt_in` + - `azure.search.documents.indexes.models.SplitSkill.azure_open_ai_tokenizer_parameters` + - `azure.search.documents.indexes.models.SplitSkill.unit` + - `azure.search.documents.knowledgebases.models.AzureBlobKnowledgeSourceParams.always_query_source` + - `azure.search.documents.knowledgebases.models.IndexedOneLakeKnowledgeSourceParams.always_query_source` + - `azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest.max_output_size` + - `azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest.output_mode` + - `azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest.messages` + - `azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest.retrieval_reasoning_effort` + - `azure.search.documents.knowledgebases.models.KnowledgeSourceParams.always_query_source` + - `azure.search.documents.knowledgebases.models.WebKnowledgeSourceParams.always_query_source` + - `azure.search.documents.models.DebugInfo.query_rewrites` + - `azure.search.documents.models.DocumentDebugInfo.inner_hits` + - `azure.search.documents.models.DocumentDebugInfo.semantic` + - `azure.search.documents.models.FacetResult.avg` + - `azure.search.documents.models.FacetResult.cardinality` + - `azure.search.documents.models.FacetResult.facets` + - `azure.search.documents.models.FacetResult.max` + - `azure.search.documents.models.FacetResult.min` + - `azure.search.documents.models.FacetResult.sum` + - `azure.search.documents.models.SearchDocumentsResult.debug_info` + - `azure.search.documents.models.SearchDocumentsResult.semantic_query_rewrites_result_type` + - `azure.search.documents.models.VectorizableTextQuery.query_rewrites` + - `azure.search.documents.models.VectorQuery.filter_override` + - `azure.search.documents.models.VectorQuery.per_document_vector_limit` + - `azure.search.documents.models.VectorQuery.threshold` + +- Below parameters do not exist in this release + - `SearchClient.search.hybrid_search` + - `SearchClient.search.query_language` + - `SearchClient.search.query_rewrites` + - `SearchClient.search.semantic_fields` + - `SearchClient.search.speller` + - `SearchIndexerClient.create_or_update_data_source_connection.skip_indexer_reset_requirement_for_cache` + - `SearchIndexerClient.create_or_update_indexer.disable_cache_reprocessing_change_detection` + - `SearchIndexerClient.create_or_update_indexer.skip_indexer_reset_requirement_for_cache` + - `SearchIndexerClient.create_or_update_skillset.disable_cache_reprocessing_change_detection` + - `SearchIndexerClient.create_or_update_skillset.skip_indexer_reset_requirement_for_cache` + +- Below operations do not exist in this release + - `SearchIndexClient.list_index_stats_summary` + - `SearchIndexerClient.reset_documents` + - `SearchIndexerClient.reset_skills` + - `SearchIndexerClient.resync` + +- Removed enum values: `KnowledgeRetrievalReasoningEffortKind.{low, medium}` (only `minimal` remains). +- Removed GPT-4o/4.1 model names from `AzureOpenAIModelName`; added GPT-5.4-mini/nano. + +### Deprecated + +The following changes are due to the migration from AutoRest to TypeSpec code generation. The old API continues to work at runtime via backward-compatible aliases: + +- `SearchFieldDataType` enum values are now UPPER_CASE (e.g., `STRING` instead of `String`). PascalCase aliases (e.g., `SearchFieldDataType.String`) are preserved and continue to work at runtime. +- `SearchField` now uses `retrievable` (from the API) as its native property instead of `hidden`. A `hidden` property (the inverse of `retrievable`) is preserved for backward compatibility via getter/setter. ### Other Changes +- Updated default API version to `2026-04-01`. + ## 11.7.0b2 (2025-11-13) ### Features Added @@ -911,7 +1073,6 @@ This version will be the last version to officially support Python 3.5, future v - Create_or_updates methods does not support partial updates #11800 - Renamed AnalyzeRequest to AnalyzeTextOptions #11800 - Renamed Batch methods #11800 - ## 1.0.0b3 (2020-05-04) diff --git a/sdk/search/azure-search-documents/_metadata.json b/sdk/search/azure-search-documents/_metadata.json index 26bd40e914cd..c934444e45c6 100644 --- a/sdk/search/azure-search-documents/_metadata.json +++ b/sdk/search/azure-search-documents/_metadata.json @@ -1,6 +1,6 @@ { - "apiVersion": "2025-11-01-preview", + "apiVersion": "2026-04-01", "apiVersions": { - "Search": "2025-11-01-preview" + "Search": "2026-04-01" } } \ No newline at end of file diff --git a/sdk/search/azure-search-documents/apiview-properties.json b/sdk/search/azure-search-documents/apiview-properties.json index 0ca75fd26700..1c6e1716daff 100644 --- a/sdk/search/azure-search-documents/apiview-properties.json +++ b/sdk/search/azure-search-documents/apiview-properties.json @@ -5,9 +5,6 @@ "azure.search.documents.indexes.models.CognitiveServicesAccount": "Search.CognitiveServicesAccount", "azure.search.documents.indexes.models.AIServicesAccountIdentity": "Search.AIServicesAccountIdentity", "azure.search.documents.indexes.models.AIServicesAccountKey": "Search.AIServicesAccountKey", - "azure.search.documents.indexes.models.AIServicesVisionParameters": "Search.AIServicesVisionParameters", - "azure.search.documents.indexes.models.VectorSearchVectorizer": "Search.VectorSearchVectorizer", - "azure.search.documents.indexes.models.AIServicesVisionVectorizer": "Search.AIServicesVisionVectorizer", "azure.search.documents.indexes.models.AnalyzedTokenInfo": "Search.AnalyzedTokenInfo", "azure.search.documents.indexes.models.AnalyzeResult": "Search.AnalyzeResult", "azure.search.documents.indexes.models.AnalyzeTextOptions": "Search.AnalyzeRequest", @@ -21,11 +18,10 @@ "azure.search.documents.knowledgebases.models.KnowledgeSourceParams": "Search.KnowledgeSourceParams", "azure.search.documents.knowledgebases.models.AzureBlobKnowledgeSourceParams": "Search.AzureBlobKnowledgeSourceParams", "azure.search.documents.indexes.models.AzureMachineLearningParameters": "Search.AMLParameters", - "azure.search.documents.indexes.models.SearchIndexerSkill": "Search.SearchIndexerSkill", - "azure.search.documents.indexes.models.AzureMachineLearningSkill": "Search.AzureMachineLearningSkill", + "azure.search.documents.indexes.models.VectorSearchVectorizer": "Search.VectorSearchVectorizer", "azure.search.documents.indexes.models.AzureMachineLearningVectorizer": "Search.AMLVectorizer", + "azure.search.documents.indexes.models.SearchIndexerSkill": "Search.SearchIndexerSkill", "azure.search.documents.indexes.models.AzureOpenAIEmbeddingSkill": "Search.AzureOpenAIEmbeddingSkill", - "azure.search.documents.indexes.models.AzureOpenAITokenizerParameters": "Search.AzureOpenAITokenizerParameters", "azure.search.documents.indexes.models.AzureOpenAIVectorizer": "Search.AzureOpenAIVectorizer", "azure.search.documents.indexes.models.AzureOpenAIVectorizerParameters": "Search.AzureOpenAIVectorizerParameters", "azure.search.documents.indexes.models.VectorSearchCompression": "Search.VectorSearchCompression", @@ -92,25 +88,18 @@ "azure.search.documents.indexes.models.HighWaterMarkChangeDetectionPolicy": "Search.HighWaterMarkChangeDetectionPolicy", "azure.search.documents.indexes.models.HnswAlgorithmConfiguration": "Search.HnswAlgorithmConfiguration", "azure.search.documents.indexes.models.HnswParameters": "Search.HnswParameters", - "azure.search.documents.models.HybridSearch": "Search.HybridSearch", "azure.search.documents.indexes.models.ImageAnalysisSkill": "Search.ImageAnalysisSkill", "azure.search.documents.models.IndexAction": "Search.IndexAction", "azure.search.documents.models.IndexDocumentsBatch": "Search.IndexBatch", "azure.search.documents.indexes.models.IndexedOneLakeKnowledgeSource": "Search.IndexedOneLakeKnowledgeSource", "azure.search.documents.indexes.models.IndexedOneLakeKnowledgeSourceParameters": "Search.IndexedOneLakeKnowledgeSourceParameters", "azure.search.documents.knowledgebases.models.IndexedOneLakeKnowledgeSourceParams": "Search.IndexedOneLakeKnowledgeSourceParams", - "azure.search.documents.indexes.models.IndexedSharePointKnowledgeSource": "Search.IndexedSharePointKnowledgeSource", - "azure.search.documents.indexes.models.IndexedSharePointKnowledgeSourceParameters": "Search.IndexedSharePointKnowledgeSourceParameters", - "azure.search.documents.knowledgebases.models.IndexedSharePointKnowledgeSourceParams": "Search.IndexedSharePointKnowledgeSourceParams", - "azure.search.documents.indexes.models.IndexerCurrentState": "Search.IndexerCurrentState", "azure.search.documents.indexes.models.IndexerExecutionResult": "Search.IndexerExecutionResult", "azure.search.documents.indexes.models.IndexerResyncBody": "Search.IndexerResyncBody", - "azure.search.documents.indexes.models.IndexerRuntime": "Search.IndexerRuntime", "azure.search.documents.indexes.models.IndexingParameters": "Search.IndexingParameters", "azure.search.documents.indexes.models.IndexingParametersConfiguration": "Search.IndexingParametersConfiguration", "azure.search.documents.models.IndexingResult": "Search.IndexingResult", "azure.search.documents.indexes.models.IndexingSchedule": "Search.IndexingSchedule", - "azure.search.documents.indexes.models.IndexStatisticsSummary": "Search.IndexStatisticsSummary", "azure.search.documents.indexes.models.InputFieldMappingEntry": "Search.InputFieldMappingEntry", "azure.search.documents.indexes.models.KeepTokenFilter": "Search.KeepTokenFilter", "azure.search.documents.indexes.models.KeyPhraseExtractionSkill": "Search.KeyPhraseExtractionSkill", @@ -128,22 +117,16 @@ "azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail": "Search.KnowledgeBaseErrorDetail", "azure.search.documents.knowledgebases.models.KnowledgeBaseImageContent": "Search.KnowledgeBaseImageContent", "azure.search.documents.knowledgebases.models.KnowledgeBaseIndexedOneLakeReference": "Search.KnowledgeBaseIndexedOneLakeReference", - "azure.search.documents.knowledgebases.models.KnowledgeBaseIndexedSharePointReference": "Search.KnowledgeBaseIndexedSharePointReference", "azure.search.documents.knowledgebases.models.KnowledgeBaseMessage": "Search.KnowledgeBaseMessage", "azure.search.documents.knowledgebases.models.KnowledgeBaseMessageContent": "Search.KnowledgeBaseMessageContent", "azure.search.documents.knowledgebases.models.KnowledgeBaseMessageImageContent": "Search.KnowledgeBaseMessageImageContent", "azure.search.documents.knowledgebases.models.KnowledgeBaseMessageTextContent": "Search.KnowledgeBaseMessageTextContent", - "azure.search.documents.knowledgebases.models.KnowledgeBaseModelAnswerSynthesisActivityRecord": "Search.KnowledgeBaseModelAnswerSynthesisActivityRecord", - "azure.search.documents.knowledgebases.models.KnowledgeBaseModelQueryPlanningActivityRecord": "Search.KnowledgeBaseModelQueryPlanningActivityRecord", - "azure.search.documents.knowledgebases.models.KnowledgeBaseRemoteSharePointReference": "Search.KnowledgeBaseRemoteSharePointReference", "azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest": "Search.KnowledgeBaseRetrievalRequest", "azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalResponse": "Search.KnowledgeBaseRetrievalResponse", "azure.search.documents.knowledgebases.models.KnowledgeBaseSearchIndexReference": "Search.KnowledgeBaseSearchIndexReference", "azure.search.documents.knowledgebases.models.KnowledgeBaseWebReference": "Search.KnowledgeBaseWebReference", "azure.search.documents.knowledgebases.models.KnowledgeRetrievalIntent": "Search.KnowledgeRetrievalIntent", "azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffort": "Search.KnowledgeRetrievalReasoningEffort", - "azure.search.documents.knowledgebases.models.KnowledgeRetrievalLowReasoningEffort": "Search.KnowledgeRetrievalLowReasoningEffort", - "azure.search.documents.knowledgebases.models.KnowledgeRetrievalMediumReasoningEffort": "Search.KnowledgeRetrievalMediumReasoningEffort", "azure.search.documents.knowledgebases.models.KnowledgeRetrievalMinimalReasoningEffort": "Search.KnowledgeRetrievalMinimalReasoningEffort", "azure.search.documents.knowledgebases.models.KnowledgeRetrievalSemanticIntent": "Search.KnowledgeRetrievalSemanticIntent", "azure.search.documents.knowledgebases.models.KnowledgeSourceVectorizer": "Search.KnowledgeSourceVectorizer", @@ -152,6 +135,7 @@ "azure.search.documents.indexes.models.KnowledgeSourceReference": "Search.KnowledgeSourceReference", "azure.search.documents.knowledgebases.models.KnowledgeSourceStatistics": "Search.KnowledgeSourceStatistics", "azure.search.documents.knowledgebases.models.KnowledgeSourceStatus": "Search.KnowledgeSourceStatus", + "azure.search.documents.knowledgebases.models.KnowledgeSourceSynchronizationError": "Search.KnowledgeSourceSynchronizationError", "azure.search.documents.indexes.models.LanguageDetectionSkill": "Search.LanguageDetectionSkill", "azure.search.documents.indexes.models.LengthTokenFilter": "Search.LengthTokenFilter", "azure.search.documents.indexes.models.LimitTokenFilter": "Search.LimitTokenFilter", @@ -181,15 +165,7 @@ "azure.search.documents.indexes.models.PIIDetectionSkill": "Search.PIIDetectionSkill", "azure.search.documents.models.QueryAnswerResult": "Search.QueryAnswerResult", "azure.search.documents.models.QueryCaptionResult": "Search.QueryCaptionResult", - "azure.search.documents.models.QueryResultDocumentInnerHit": "Search.QueryResultDocumentInnerHit", - "azure.search.documents.models.QueryResultDocumentRerankerInput": "Search.QueryResultDocumentRerankerInput", - "azure.search.documents.models.QueryResultDocumentSemanticField": "Search.QueryResultDocumentSemanticField", "azure.search.documents.models.QueryResultDocumentSubscores": "Search.QueryResultDocumentSubscores", - "azure.search.documents.models.QueryRewritesDebugInfo": "Search.QueryRewritesDebugInfo", - "azure.search.documents.models.QueryRewritesValuesDebugInfo": "Search.QueryRewritesValuesDebugInfo", - "azure.search.documents.indexes.models.RemoteSharePointKnowledgeSource": "Search.RemoteSharePointKnowledgeSource", - "azure.search.documents.indexes.models.RemoteSharePointKnowledgeSourceParameters": "Search.RemoteSharePointKnowledgeSourceParameters", - "azure.search.documents.knowledgebases.models.RemoteSharePointKnowledgeSourceParams": "Search.RemoteSharePointKnowledgeSourceParams", "azure.search.documents.indexes.models.RescoringOptions": "Search.RescoringOptions", "azure.search.documents.indexes.models.ResourceCounter": "Search.ResourceCounter", "azure.search.documents.indexes.models.ScalarQuantizationCompression": "Search.ScalarQuantizationCompression", @@ -200,7 +176,6 @@ "azure.search.documents.indexes.models.SearchField": "Search.SearchField", "azure.search.documents.indexes.models.SearchIndex": "Search.SearchIndex", "azure.search.documents.indexes.models.SearchIndexer": "Search.SearchIndexer", - "azure.search.documents.indexes.models.SearchIndexerCache": "Search.SearchIndexerCache", "azure.search.documents.indexes.models.SearchIndexerDataContainer": "Search.SearchIndexerDataContainer", "azure.search.documents.indexes.models.SearchIndexerDataIdentity": "Search.SearchIndexerDataIdentity", "azure.search.documents.indexes.models.SearchIndexerDataNoneIdentity": "Search.SearchIndexerDataNoneIdentity", @@ -215,7 +190,6 @@ "azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreBlobProjectionSelector": "Search.SearchIndexerKnowledgeStoreBlobProjectionSelector", "azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector": "Search.SearchIndexerKnowledgeStoreFileProjectionSelector", "azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector": "Search.SearchIndexerKnowledgeStoreObjectProjectionSelector", - "azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreParameters": "Search.SearchIndexerKnowledgeStoreParameters", "azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection": "Search.SearchIndexerKnowledgeStoreProjection", "azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector": "Search.SearchIndexerKnowledgeStoreTableProjectionSelector", "azure.search.documents.indexes.models.SearchIndexerLimits": "Search.SearchIndexerLimits", @@ -229,21 +203,16 @@ "azure.search.documents.models.SearchRequest": "Search.SearchRequest", "azure.search.documents.indexes.models.SearchResourceEncryptionKey": "Search.SearchResourceEncryptionKey", "azure.search.documents.models.SearchResult": "Search.SearchResult", - "azure.search.documents.models.VectorThreshold": "Search.VectorThreshold", - "azure.search.documents.models.SearchScoreThreshold": "Search.SearchScoreThreshold", "azure.search.documents.indexes.models.SearchServiceCounters": "Search.SearchServiceCounters", "azure.search.documents.indexes.models.SearchServiceLimits": "Search.SearchServiceLimits", "azure.search.documents.indexes.models.SearchServiceStatistics": "Search.SearchServiceStatistics", "azure.search.documents.indexes.models.SearchSuggester": "Search.SearchSuggester", "azure.search.documents.indexes.models.SemanticConfiguration": "Search.SemanticConfiguration", - "azure.search.documents.models.SemanticDebugInfo": "Search.SemanticDebugInfo", "azure.search.documents.indexes.models.SemanticField": "Search.SemanticField", "azure.search.documents.indexes.models.SemanticPrioritizedFields": "Search.SemanticPrioritizedFields", "azure.search.documents.indexes.models.SemanticSearch": "Search.SemanticSearch", "azure.search.documents.indexes.models.SentimentSkillV3": "Search.SentimentSkillV3", - "azure.search.documents.indexes.models.ServiceIndexersRuntime": "Search.ServiceIndexersRuntime", "azure.search.documents.indexes.models.ShaperSkill": "Search.ShaperSkill", - "azure.search.documents.knowledgebases.models.SharePointSensitivityLabelInfo": "Search.SharePointSensitivityLabelInfo", "azure.search.documents.indexes.models.ShingleTokenFilter": "Search.ShingleTokenFilter", "azure.search.documents.models.SingleVectorFieldResult": "Search.SingleVectorFieldResult", "azure.search.documents.indexes.models.SkillNames": "Search.SkillNames", @@ -275,8 +244,6 @@ "azure.search.documents.models.VectorsDebugInfo": "Search.VectorsDebugInfo", "azure.search.documents.indexes.models.VectorSearch": "Search.VectorSearch", "azure.search.documents.indexes.models.VectorSearchProfile": "Search.VectorSearchProfile", - "azure.search.documents.models.VectorSimilarityThreshold": "Search.VectorSimilarityThreshold", - "azure.search.documents.indexes.models.VisionVectorizeSkill": "Search.VisionVectorizeSkill", "azure.search.documents.indexes.models.WebApiHttpHeaders": "Search.WebApiHttpHeaders", "azure.search.documents.indexes.models.WebApiSkill": "Search.WebApiSkill", "azure.search.documents.indexes.models.WebApiVectorizer": "Search.WebApiVectorizer", @@ -291,24 +258,16 @@ "azure.search.documents.models.ScoringStatistics": "Search.ScoringStatistics", "azure.search.documents.models.QueryDebugMode": "Search.QueryDebugMode", "azure.search.documents.models.SearchMode": "Search.SearchMode", - "azure.search.documents.models.QueryLanguage": "Search.QueryLanguage", - "azure.search.documents.models.QuerySpellerType": "Search.QuerySpellerType", "azure.search.documents.models.SemanticErrorMode": "Search.SemanticErrorMode", "azure.search.documents.models.QueryAnswerType": "Search.QueryAnswerType", "azure.search.documents.models.QueryCaptionType": "Search.QueryCaptionType", - "azure.search.documents.models.QueryRewritesType": "Search.QueryRewritesType", - "azure.search.documents.models.VectorThresholdKind": "Search.VectorThresholdKind", "azure.search.documents.models.VectorQueryKind": "Search.VectorQueryKind", "azure.search.documents.models.VectorFilterMode": "Search.VectorFilterMode", - "azure.search.documents.models.HybridCountAndFacetMode": "Search.HybridCountAndFacetMode", - "azure.search.documents.models.SemanticFieldState": "Search.SemanticFieldState", "azure.search.documents.models.SemanticErrorReason": "Search.SemanticErrorReason", "azure.search.documents.models.SemanticSearchResultsType": "Search.SemanticSearchResultsType", - "azure.search.documents.models.SemanticQueryRewritesResultType": "Search.SemanticQueryRewritesResultType", "azure.search.documents.models.IndexActionType": "Search.IndexActionType", "azure.search.documents.models.AutocompleteMode": "Search.AutocompleteMode", "azure.search.documents.models.SearchFieldDataType": "Search.SearchFieldDataType", - "azure.search.documents.models.PermissionFilter": "Search.PermissionFilter", "azure.search.documents.models.LexicalAnalyzerName": "Search.LexicalAnalyzerName", "azure.search.documents.models.LexicalNormalizerName": "Search.LexicalNormalizerName", "azure.search.documents.models.VectorEncodingFormat": "Search.VectorEncodingFormat", @@ -336,18 +295,12 @@ "azure.search.documents.models.VectorSearchCompressionRescoreStorageMethod": "Search.VectorSearchCompressionRescoreStorageMethod", "azure.search.documents.models.VectorSearchCompressionKind": "Search.VectorSearchCompressionKind", "azure.search.documents.models.VectorSearchCompressionTarget": "Search.VectorSearchCompressionTarget", - "azure.search.documents.models.SearchIndexPermissionFilterOption": "Search.SearchIndexPermissionFilterOption", "azure.search.documents.models.KnowledgeBaseModelKind": "Search.KnowledgeBaseModelKind", - "azure.search.documents.models.KnowledgeRetrievalReasoningEffortKind": "Search.KnowledgeRetrievalReasoningEffortKind", - "azure.search.documents.models.KnowledgeRetrievalOutputMode": "Search.KnowledgeRetrievalOutputMode", "azure.search.documents.models.KnowledgeSourceKind": "Search.KnowledgeSourceKind", "azure.search.documents.models.KnowledgeSourceIngestionPermissionOption": "Search.KnowledgeSourceIngestionPermissionOption", "azure.search.documents.models.KnowledgeSourceContentExtractionMode": "Search.KnowledgeSourceContentExtractionMode", - "azure.search.documents.models.IndexedSharePointContainerName": "Search.IndexedSharePointContainerName", "azure.search.documents.models.KnowledgeSourceSynchronizationStatus": "Search.KnowledgeSourceSynchronizationStatus", "azure.search.documents.models.SearchIndexerDataSourceType": "Search.SearchIndexerDataSourceType", - "azure.search.documents.models.IndexerPermissionOption": "Search.IndexerPermissionOption", - "azure.search.documents.models.IndexerResyncOption": "Search.IndexerResyncOption", "azure.search.documents.models.BlobIndexerParsingMode": "Search.BlobIndexerParsingMode", "azure.search.documents.models.MarkdownParsingSubmode": "Search.MarkdownParsingSubmode", "azure.search.documents.models.MarkdownHeaderDepth": "Search.MarkdownHeaderDepth", @@ -357,8 +310,6 @@ "azure.search.documents.models.IndexerExecutionEnvironment": "Search.IndexerExecutionEnvironment", "azure.search.documents.models.IndexerStatus": "Search.IndexerStatus", "azure.search.documents.models.IndexerExecutionStatus": "Search.IndexerExecutionStatus", - "azure.search.documents.models.IndexerExecutionStatusDetail": "Search.IndexerExecutionStatusDetail", - "azure.search.documents.models.IndexingMode": "Search.IndexingMode", "azure.search.documents.models.ChatCompletionExtraParametersBehavior": "Search.ChatCompletionExtraParametersBehavior", "azure.search.documents.models.ChatCompletionResponseFormatType": "Search.ChatCompletionResponseFormatType", "azure.search.documents.models.CustomEntityLookupSkillLanguage": "Search.CustomEntityLookupSkillLanguage", @@ -366,9 +317,10 @@ "azure.search.documents.models.PIIDetectionSkillMaskingMode": "Search.PIIDetectionSkillMaskingMode", "azure.search.documents.models.SplitSkillLanguage": "Search.SplitSkillLanguage", "azure.search.documents.models.TextSplitMode": "Search.TextSplitMode", - "azure.search.documents.models.SplitSkillUnit": "Search.SplitSkillUnit", - "azure.search.documents.models.SplitSkillEncoderModelName": "Search.SplitSkillEncoderModelName", "azure.search.documents.models.TextTranslationSkillLanguage": "Search.TextTranslationSkillLanguage", + "azure.search.documents.models.EntityCategory": "Search.EntityCategory", + "azure.search.documents.models.EntityRecognitionSkillLanguage": "Search.EntityRecognitionSkillLanguage", + "azure.search.documents.models.SentimentSkillLanguage": "Search.SentimentSkillLanguage", "azure.search.documents.models.ContentUnderstandingSkillExtractionOptions": "Search.ContentUnderstandingSkillExtractionOptions", "azure.search.documents.models.ContentUnderstandingSkillChunkingUnit": "Search.ContentUnderstandingSkillChunkingUnit", "azure.search.documents.models.DocumentIntelligenceLayoutSkillOutputFormat": "Search.DocumentIntelligenceLayoutSkillOutputFormat", @@ -384,8 +336,10 @@ "azure.search.documents.models.IndexProjectionMode": "Search.IndexProjectionMode", "azure.search.documents.models.KnowledgeBaseMessageContentType": "Search.KnowledgeBaseMessageContentType", "azure.search.documents.models.KnowledgeBaseActivityRecordType": "Search.KnowledgeBaseActivityRecordType", + "azure.search.documents.models.KnowledgeRetrievalReasoningEffortKind": "Search.KnowledgeRetrievalReasoningEffortKind", "azure.search.documents.models.KnowledgeBaseReferenceType": "Search.KnowledgeBaseReferenceType", "azure.search.documents.models.KnowledgeRetrievalIntentType": "Search.KnowledgeRetrievalIntentType", + "azure.search.documents.models.IndexerResyncOption": "Search.IndexerResyncOption", "azure.search.documents.SearchClient.get_document_count": "Customizations.SearchClient.Documents.count", "azure.search.documents.aio.SearchClient.get_document_count": "Customizations.SearchClient.Documents.count", "azure.search.documents.SearchClient.get_document": "Customizations.SearchClient.Documents.get", @@ -420,8 +374,6 @@ "azure.search.documents.aio.SearchIndexClient.get_knowledge_source_status": "Customizations.SearchIndexClient.Sources.getStatus", "azure.search.documents.SearchIndexClient.get_service_statistics": "Customizations.SearchIndexClient.Root.getServiceStatistics", "azure.search.documents.aio.SearchIndexClient.get_service_statistics": "Customizations.SearchIndexClient.Root.getServiceStatistics", - "azure.search.documents.SearchIndexClient.list_index_stats_summary": "Customizations.SearchIndexClient.Root.getIndexStatsSummary", - "azure.search.documents.aio.SearchIndexClient.list_index_stats_summary": "Customizations.SearchIndexClient.Root.getIndexStatsSummary", "azure.search.documents.SearchIndexerClient.get_data_source_connection": "Customizations.SearchIndexerClient.DataSources.get", "azure.search.documents.aio.SearchIndexerClient.get_data_source_connection": "Customizations.SearchIndexerClient.DataSources.get", "azure.search.documents.SearchIndexerClient.create_data_source_connection": "Customizations.SearchIndexerClient.DataSources.create", diff --git a/sdk/search/azure-search-documents/assets.json b/sdk/search/azure-search-documents/assets.json index 2cf68dad6a3c..356d4ab14e05 100644 --- a/sdk/search/azure-search-documents/assets.json +++ b/sdk/search/azure-search-documents/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/search/azure-search-documents", - "Tag": "python/search/azure-search-documents_18d40a0a11" + "Tag": "python/search/azure-search-documents_35a2c408d6" } diff --git a/sdk/search/azure-search-documents/azure/search/documents/_client.py b/sdk/search/azure-search-documents/azure/search/documents/_client.py index f96d7ea85bb4..99eabf66308d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_client.py @@ -26,7 +26,7 @@ class SearchClient(_SearchClientOperationsMixin): """SearchClient. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. @@ -34,9 +34,9 @@ class SearchClient(_SearchClientOperationsMixin): ~azure.core.credentials.TokenCredential :param index_name: The name of the index. Required. :type index_name: str - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_configuration.py index dfe36d2d3450..f73fc4c9954d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_configuration.py @@ -23,7 +23,7 @@ class SearchClientConfiguration: # pylint: disable=too-many-instance-attributes Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. @@ -31,16 +31,16 @@ class SearchClientConfiguration: # pylint: disable=too-many-instance-attributes ~azure.core.credentials.TokenCredential :param index_name: The name of the index. Required. :type index_name: str - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], index_name: str, **kwargs: Any ) -> None: - api_version: str = kwargs.pop("api_version", "2025-11-01-preview") + api_version: str = kwargs.pop("api_version", "2026-04-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/_operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_operations/_operations.py index b2a620ff97e0..a21db33544e6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_operations/_operations.py @@ -6,7 +6,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint: disable=import-error from io import IOBase import json from typing import Any, Callable, IO, Optional, TypeVar, Union, overload @@ -46,7 +46,7 @@ def build_search_get_document_count_request(index_name: str, **kwargs: Any) -> H _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=none") # Construct URL @@ -61,7 +61,8 @@ def build_search_get_document_count_request(index_name: str, **kwargs: Any) -> H _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -69,8 +70,6 @@ def build_search_get_document_count_request(index_name: str, **kwargs: Any) -> H def build_search_search_get_request( # pylint: disable=too-many-locals,too-many-statements,too-many-branches index_name: str, *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, search_text: Optional[str] = None, include_total_result_count: Optional[bool] = None, facets: Optional[list[str]] = None, @@ -96,17 +95,13 @@ def build_search_search_get_request( # pylint: disable=too-many-locals,too-many answers: Optional[Union[str, _models1.QueryAnswerType]] = None, captions: Optional[Union[str, _models1.QueryCaptionType]] = None, semantic_query: Optional[str] = None, - query_rewrites: Optional[Union[str, _models1.QueryRewritesType]] = None, debug: Optional[Union[str, _models1.QueryDebugMode]] = None, - query_language: Optional[Union[str, _models1.QueryLanguage]] = None, - speller: Optional[Union[str, _models1.QuerySpellerType]] = None, - semantic_fields: Optional[list[str]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=none") # Construct URL @@ -173,41 +168,22 @@ def build_search_search_get_request( # pylint: disable=too-many-locals,too-many _params["captions"] = _SERIALIZER.query("captions", captions, "str") if semantic_query is not None: _params["semanticQuery"] = _SERIALIZER.query("semantic_query", semantic_query, "str") - if query_rewrites is not None: - _params["queryRewrites"] = _SERIALIZER.query("query_rewrites", query_rewrites, "str") if debug is not None: _params["debug"] = _SERIALIZER.query("debug", debug, "str") - if query_language is not None: - _params["queryLanguage"] = _SERIALIZER.query("query_language", query_language, "str") - if speller is not None: - _params["speller"] = _SERIALIZER.query("speller", speller, "str") - if semantic_fields is not None: - _params["semanticFields"] = _SERIALIZER.query("semantic_fields", semantic_fields, "[str]", div=",") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - if query_source_authorization is not None: - _headers["x-ms-query-source-authorization"] = _SERIALIZER.header( - "query_source_authorization", query_source_authorization, "str" - ) - if enable_elevated_read is not None: - _headers["x-ms-enable-elevated-read"] = _SERIALIZER.header("enable_elevated_read", enable_elevated_read, "bool") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_search_search_post_request( - index_name: str, - *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, - **kwargs: Any -) -> HttpRequest: +def build_search_search_post_request(index_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=none") # Construct URL @@ -222,13 +198,8 @@ def build_search_search_post_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - if query_source_authorization is not None: - _headers["x-ms-query-source-authorization"] = _SERIALIZER.header( - "query_source_authorization", query_source_authorization, "str" - ) - if enable_elevated_read is not None: - _headers["x-ms-enable-elevated-read"] = _SERIALIZER.header("enable_elevated_read", enable_elevated_read, "bool") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -236,18 +207,12 @@ def build_search_search_post_request( def build_search_get_document_request( - key: str, - index_name: str, - *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, - selected_fields: Optional[list[str]] = None, - **kwargs: Any + key: str, index_name: str, *, selected_fields: Optional[list[str]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=none") # Construct URL @@ -265,13 +230,8 @@ def build_search_get_document_request( _params["$select"] = _SERIALIZER.query("selected_fields", selected_fields, "[str]", div=",") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - if query_source_authorization is not None: - _headers["x-ms-query-source-authorization"] = _SERIALIZER.header( - "query_source_authorization", query_source_authorization, "str" - ) - if enable_elevated_read is not None: - _headers["x-ms-enable-elevated-read"] = _SERIALIZER.header("enable_elevated_read", enable_elevated_read, "bool") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -295,7 +255,7 @@ def build_search_suggest_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=none") # Construct URL @@ -330,7 +290,8 @@ def build_search_suggest_get_request( _params["$top"] = _SERIALIZER.query("top", top, "int") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -340,7 +301,7 @@ def build_search_suggest_post_request(index_name: str, **kwargs: Any) -> HttpReq _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=none") # Construct URL @@ -355,7 +316,8 @@ def build_search_suggest_post_request(index_name: str, **kwargs: Any) -> HttpReq _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -367,7 +329,7 @@ def build_search_index_request(index_name: str, **kwargs: Any) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=none") # Construct URL @@ -382,7 +344,8 @@ def build_search_index_request(index_name: str, **kwargs: Any) -> HttpRequest: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -394,7 +357,7 @@ def build_search_autocomplete_get_request( *, search_text: str, suggester_name: str, - autocomplete_mode: Optional[Union[str, _models1.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models1._enums.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -407,7 +370,7 @@ def build_search_autocomplete_get_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=none") # Construct URL @@ -440,7 +403,8 @@ def build_search_autocomplete_get_request( _params["$top"] = _SERIALIZER.query("top", top, "int") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -450,7 +414,7 @@ def build_search_autocomplete_post_request(index_name: str, **kwargs: Any) -> Ht _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=none") # Construct URL @@ -465,7 +429,8 @@ def build_search_autocomplete_post_request(index_name: str, **kwargs: Any) -> Ht _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -508,6 +473,7 @@ def get_document_count(self, **kwargs: Any) -> int: } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -532,7 +498,7 @@ def get_document_count(self, **kwargs: Any) -> int: response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(int, response.text()) @@ -545,8 +511,6 @@ def get_document_count(self, **kwargs: Any) -> int: def _search_get( # pylint: disable=too-many-locals self, *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, search_text: Optional[str] = None, include_total_result_count: Optional[bool] = None, facets: Optional[list[str]] = None, @@ -572,22 +536,11 @@ def _search_get( # pylint: disable=too-many-locals answers: Optional[Union[str, _models1.QueryAnswerType]] = None, captions: Optional[Union[str, _models1.QueryCaptionType]] = None, semantic_query: Optional[str] = None, - query_rewrites: Optional[Union[str, _models1.QueryRewritesType]] = None, debug: Optional[Union[str, _models1.QueryDebugMode]] = None, - query_language: Optional[Union[str, _models1.QueryLanguage]] = None, - speller: Optional[Union[str, _models1.QuerySpellerType]] = None, - semantic_fields: Optional[list[str]] = None, **kwargs: Any ) -> _models1.SearchDocumentsResult: """Searches for documents in the index. - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str - :keyword enable_elevated_read: A value that enables elevated read that bypass document level - permission checks for the query operation. Default value is None. - :paramtype enable_elevated_read: bool :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to match all documents. Default value is None. :paramtype search_text: str @@ -705,33 +658,10 @@ def _search_get( # pylint: disable=too-many-locals is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. Default value is None. :paramtype semantic_query: str - :keyword query_rewrites: When QueryRewrites is set to ``generative``, the query terms are sent - to a generate model which will produce 10 (default) rewrites to help increase the recall of the - request. The requested count can be configured by appending the pipe character ``|`` followed - by the ``count-`` option, such as ``generative|count-3``. Defaults to - ``None``. This parameter is only valid if the query type is ``semantic``. Known values are: - "none" and "generative". Default value is None. - :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType :keyword debug: Enables a debugging tool that can be used to further explore your search results. Known values are: "disabled", "semantic", "vector", "queryRewrites", "innerHits", and "all". Default value is None. :paramtype debug: str or ~azure.search.documents.models.QueryDebugMode - :keyword query_language: The language of the query. Known values are: "none", "en-us", "en-gb", - "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", "es-mx", "zh-cn", "zh-tw", - "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", "nl-be", "nl-nl", "hu-hu", - "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", "ar-kw", "ar-jo", "da-dk", - "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", "ta-in", "vi-vn", "el-gr", - "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", "et-ee", "ca-es", "fi-fi", - "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", "eu-es", "gl-es", "gu-in", - "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", "te-in", and "ur-pk". Default - value is None. - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword speller: Improve search recall by spell-correcting individual search query terms. - Known values are: "none" and "lexicon". Default value is None. - :paramtype speller: str or ~azure.search.documents.models.QuerySpellerType - :keyword semantic_fields: The list of field names used for semantic ranking. Default value is - None. - :paramtype semantic_fields: list[str] :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.SearchDocumentsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -751,8 +681,6 @@ def _search_get( # pylint: disable=too-many-locals _request = build_search_search_get_request( index_name=self._config.index_name, - query_source_authorization=query_source_authorization, - enable_elevated_read=enable_elevated_read, search_text=search_text, include_total_result_count=include_total_result_count, facets=facets, @@ -778,11 +706,7 @@ def _search_get( # pylint: disable=too-many-locals answers=answers, captions=captions, semantic_query=semantic_query, - query_rewrites=query_rewrites, debug=debug, - query_language=query_language, - speller=speller, - semantic_fields=semantic_fields, api_version=self._config.api_version, headers=_headers, params=_params, @@ -792,6 +716,7 @@ def _search_get( # pylint: disable=too-many-locals } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -813,7 +738,7 @@ def _search_get( # pylint: disable=too-many-locals raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchDocumentsResult, response.json()) @@ -826,8 +751,6 @@ def _search_get( # pylint: disable=too-many-locals def _search_post( # pylint: disable=too-many-locals self, *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, content_type: str = "application/json", include_total_count: Optional[bool] = None, facets: Optional[list[str]] = None, @@ -846,8 +769,6 @@ def _search_post( # pylint: disable=too-many-locals search_text: Optional[str] = None, search_fields: Optional[list[str]] = None, search_mode: Optional[Union[str, _models1.SearchMode]] = None, - query_language: Optional[Union[str, _models1.QueryLanguage]] = None, - query_speller: Optional[Union[str, _models1.QuerySpellerType]] = None, select: Optional[list[str]] = None, skip: Optional[int] = None, top: Optional[int] = None, @@ -857,32 +778,17 @@ def _search_post( # pylint: disable=too-many-locals semantic_query: Optional[str] = None, answers: Optional[Union[str, _models1.QueryAnswerType]] = None, captions: Optional[Union[str, _models1.QueryCaptionType]] = None, - query_rewrites: Optional[Union[str, _models1.QueryRewritesType]] = None, - semantic_fields: Optional[list[str]] = None, vector_queries: Optional[list[_models1.VectorQuery]] = None, vector_filter_mode: Optional[Union[str, _models1.VectorFilterMode]] = None, - hybrid_search: Optional[_models1.HybridSearch] = None, **kwargs: Any ) -> _models1.SearchDocumentsResult: ... @overload def _search_post( - self, - body: JSON, - *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, - content_type: str = "application/json", - **kwargs: Any + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models1.SearchDocumentsResult: ... @overload def _search_post( - self, - body: IO[bytes], - *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, - content_type: str = "application/json", - **kwargs: Any + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models1.SearchDocumentsResult: ... @distributed_trace @@ -890,8 +796,6 @@ def _search_post( # pylint: disable=too-many-locals self, body: Union[JSON, IO[bytes]] = _Unset, *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, include_total_count: Optional[bool] = None, facets: Optional[list[str]] = None, filter: Optional[str] = None, @@ -909,8 +813,6 @@ def _search_post( # pylint: disable=too-many-locals search_text: Optional[str] = None, search_fields: Optional[list[str]] = None, search_mode: Optional[Union[str, _models1.SearchMode]] = None, - query_language: Optional[Union[str, _models1.QueryLanguage]] = None, - query_speller: Optional[Union[str, _models1.QuerySpellerType]] = None, select: Optional[list[str]] = None, skip: Optional[int] = None, top: Optional[int] = None, @@ -920,24 +822,14 @@ def _search_post( # pylint: disable=too-many-locals semantic_query: Optional[str] = None, answers: Optional[Union[str, _models1.QueryAnswerType]] = None, captions: Optional[Union[str, _models1.QueryCaptionType]] = None, - query_rewrites: Optional[Union[str, _models1.QueryRewritesType]] = None, - semantic_fields: Optional[list[str]] = None, vector_queries: Optional[list[_models1.VectorQuery]] = None, vector_filter_mode: Optional[Union[str, _models1.VectorFilterMode]] = None, - hybrid_search: Optional[_models1.HybridSearch] = None, **kwargs: Any ) -> _models1.SearchDocumentsResult: """Searches for documents in the index. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str - :keyword enable_elevated_read: A value that enables elevated read that bypass document level - permission checks for the query operation. Default value is None. - :paramtype enable_elevated_read: bool :keyword include_total_count: A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. Default value is None. @@ -1011,19 +903,6 @@ def _search_post( # pylint: disable=too-many-locals matched in order to count the document as a match. Known values are: "any" and "all". Default value is None. :paramtype search_mode: str or ~azure.search.documents.models.SearchMode - :keyword query_language: A value that specifies the language of the search query. Known values - are: "none", "en-us", "en-gb", "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", - "es-mx", "zh-cn", "zh-tw", "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", - "nl-be", "nl-nl", "hu-hu", "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", - "ar-kw", "ar-jo", "da-dk", "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", - "ta-in", "vi-vn", "el-gr", "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", - "et-ee", "ca-es", "fi-fi", "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", - "eu-es", "gl-es", "gu-in", "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", - "te-in", and "ur-pk". Default value is None. - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword query_speller: A value that specifies the type of the speller to use to spell-correct - individual search query terms. Known values are: "none" and "lexicon". Default value is None. - :paramtype query_speller: str or ~azure.search.documents.models.QuerySpellerType :keyword select: The comma-separated list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. Default value is None. :paramtype select: list[str] @@ -1059,12 +938,6 @@ def _search_post( # pylint: disable=too-many-locals :keyword captions: A value that specifies whether captions should be returned as part of the search response. Known values are: "none" and "extractive". Default value is None. :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType - :keyword query_rewrites: A value that specifies whether query rewrites should be generated to - augment the search query. Known values are: "none" and "generative". Default value is None. - :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType - :keyword semantic_fields: The comma-separated list of field names used for semantic ranking. - Default value is None. - :paramtype semantic_fields: list[str] :keyword vector_queries: The query parameters for vector and hybrid search queries. Default value is None. :paramtype vector_queries: list[~azure.search.documents.models.VectorQuery] @@ -1072,9 +945,6 @@ def _search_post( # pylint: disable=too-many-locals vector search is performed. Default is 'preFilter' for new indexes. Known values are: "postFilter", "preFilter", and "strictPostFilter". Default value is None. :paramtype vector_filter_mode: str or ~azure.search.documents.models.VectorFilterMode - :keyword hybrid_search: The query parameters to configure hybrid search behaviors. Default - value is None. - :paramtype hybrid_search: ~azure.search.documents.models.HybridSearch :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.SearchDocumentsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -1104,11 +974,8 @@ def _search_post( # pylint: disable=too-many-locals "highlight": highlight_fields, "highlightPostTag": highlight_post_tag, "highlightPreTag": highlight_pre_tag, - "hybridSearch": hybrid_search, "minimumCoverage": minimum_coverage, "orderby": order_by, - "queryLanguage": query_language, - "queryRewrites": query_rewrites, "queryType": query_type, "scoringParameters": scoring_parameters, "scoringProfile": scoring_profile, @@ -1119,12 +986,10 @@ def _search_post( # pylint: disable=too-many-locals "select": select, "semanticConfiguration": semantic_configuration_name, "semanticErrorHandling": semantic_error_handling, - "semanticFields": semantic_fields, "semanticMaxWaitInMilliseconds": semantic_max_wait_in_milliseconds, "semanticQuery": semantic_query, "sessionId": session_id, "skip": skip, - "speller": query_speller, "top": top, "vectorFilterMode": vector_filter_mode, "vectorQueries": vector_queries, @@ -1139,8 +1004,6 @@ def _search_post( # pylint: disable=too-many-locals _request = build_search_search_post_request( index_name=self._config.index_name, - query_source_authorization=query_source_authorization, - enable_elevated_read=enable_elevated_read, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1152,6 +1015,7 @@ def _search_post( # pylint: disable=too-many-locals } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1173,7 +1037,7 @@ def _search_post( # pylint: disable=too-many-locals raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchDocumentsResult, response.json()) @@ -1184,25 +1048,12 @@ def _search_post( # pylint: disable=too-many-locals @distributed_trace def get_document( - self, - key: str, - *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, - selected_fields: Optional[list[str]] = None, - **kwargs: Any + self, key: str, *, selected_fields: Optional[list[str]] = None, **kwargs: Any ) -> _models1.LookupDocument: """Retrieves a document from the index. :param key: The key of the document to retrieve. Required. :type key: str - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str - :keyword enable_elevated_read: A value that enables elevated read that bypass document level - permission checks for the query operation. Default value is None. - :paramtype enable_elevated_read: bool :keyword selected_fields: List of field names to retrieve for the document; Any field not retrieved will be missing from the returned document. Default value is None. :paramtype selected_fields: list[str] @@ -1226,8 +1077,6 @@ def get_document( _request = build_search_get_document_request( key=key, index_name=self._config.index_name, - query_source_authorization=query_source_authorization, - enable_elevated_read=enable_elevated_read, selected_fields=selected_fields, api_version=self._config.api_version, headers=_headers, @@ -1238,6 +1087,7 @@ def get_document( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1259,7 +1109,7 @@ def get_document( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.LookupDocument, response.json()) @@ -1370,6 +1220,7 @@ def _suggest_get( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1391,7 +1242,7 @@ def _suggest_get( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models1._models.SuggestDocumentsResult, response.json() # pylint: disable=protected-access @@ -1552,6 +1403,7 @@ def _suggest_post( # pylint: disable=too-many-locals } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1573,7 +1425,7 @@ def _suggest_post( # pylint: disable=too-many-locals raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models1._models.SuggestDocumentsResult, response.json() # pylint: disable=protected-access @@ -1644,6 +1496,7 @@ def _index( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1665,7 +1518,7 @@ def _index( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models1._models.IndexDocumentsResult, response.json() # pylint: disable=protected-access @@ -1682,7 +1535,7 @@ def _autocomplete_get( *, search_text: str, suggester_name: str, - autocomplete_mode: Optional[Union[str, _models1.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models1._enums.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -1768,6 +1621,7 @@ def _autocomplete_get( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1789,7 +1643,7 @@ def _autocomplete_get( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models1._models.AutocompleteResult, response.json() # pylint: disable=protected-access @@ -1807,7 +1661,7 @@ def _autocomplete_post( search_text: str, suggester_name: str, content_type: str = "application/json", - autocomplete_mode: Optional[Union[str, _models1.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models1._enums.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -1833,7 +1687,7 @@ def _autocomplete_post( # pylint: disable=too-many-locals *, search_text: str = _Unset, suggester_name: str = _Unset, - autocomplete_mode: Optional[Union[str, _models1.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models1._enums.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -1940,6 +1794,7 @@ def _autocomplete_post( # pylint: disable=too-many-locals } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1961,7 +1816,7 @@ def _autocomplete_post( # pylint: disable=too-many-locals raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models1._models.AutocompleteResult, response.json() # pylint: disable=protected-access diff --git a/sdk/search/azure-search-documents/azure/search/documents/_operations/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/_operations/_patch.py index 95ae9f41edb2..7bcab5f499ca 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_operations/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_operations/_patch.py @@ -104,14 +104,11 @@ def _build_search_request( semantic_query: Optional[str] = None, search_fields: Optional[List[str]] = None, search_mode: Optional[Union[str, _models.SearchMode]] = None, - query_language: Optional[Union[str, _models.QueryLanguage]] = None, - query_speller: Optional[Union[str, _models.QuerySpellerType]] = None, query_answer: Optional[Union[str, _models.QueryAnswerType]] = None, query_answer_count: Optional[int] = None, query_answer_threshold: Optional[float] = None, query_caption: Optional[Union[str, _models.QueryCaptionType]] = None, query_caption_highlight_enabled: Optional[bool] = None, - semantic_fields: Optional[List[str]] = None, semantic_configuration_name: Optional[str] = None, select: Optional[List[str]] = None, skip: Optional[int] = None, @@ -122,10 +119,7 @@ def _build_search_request( vector_filter_mode: Optional[Union[str, _models.VectorFilterMode]] = None, semantic_error_mode: Optional[Union[str, _models.SemanticErrorMode]] = None, semantic_max_wait_in_milliseconds: Optional[int] = None, - query_rewrites: Optional[Union[str, _models.QueryRewritesType]] = None, - query_rewrites_count: Optional[int] = None, debug: Optional[Union[str, _models.QueryDebugMode]] = None, - hybrid_search: Optional[_models.HybridSearch] = None, ) -> _models.SearchRequest: # pylint:disable=too-many-locals """Build a SearchRequest from search parameters. @@ -153,15 +147,11 @@ def _build_search_request( this parameter. :paramtype search_fields: list[str] :keyword search_mode: The search mode to use for the search query. - :keyword query_language: The language of the search query. - :keyword query_speller: The type of spell checking to use for the search query. :keyword query_answer: The type of answers to retrieve for a semantic search query. :keyword int query_answer_count: The maximum number of answers to retrieve. :keyword float query_answer_threshold: The confidence score threshold for answers to be included in the results. :keyword query_caption: The type of captions to retrieve for a semantic search query. :keyword bool query_caption_highlight_enabled: A value indicating whether caption highlights are enabled. - :keyword semantic_fields: The comma-separated list of field names used for semantic ranking. - :paramtype semantic_fields: list[str] :keyword str semantic_configuration_name: The name of the semantic configuration to use for the search. :keyword list[str] select: The list of field names to retrieve in the search results. :keyword int skip: The number of search results to skip. @@ -172,10 +162,7 @@ def _build_search_request( :keyword vector_filter_mode: The vector filter mode to use for the search query. :keyword semantic_error_mode: The semantic error handling mode to use for the search query. :keyword int semantic_max_wait_in_milliseconds: The maximum wait time in milliseconds for semantic search. - :keyword query_rewrites: The type of query rewrites to apply for the search query. - :keyword int query_rewrites_count: The maximum number of query rewrites to apply. :keyword debug: The debug mode for the search query. - :keyword hybrid_search: The hybrid search configuration for the search query. :return: SearchRequest :rtype: ~azure.search.documents.models.SearchRequest """ @@ -194,12 +181,6 @@ def _build_search_request( if query_caption_highlight_enabled is not None: captions = f"{captions}|highlight-{str(query_caption_highlight_enabled).lower()}" - rewrites = None - if query_rewrites: - rewrites = str(query_rewrites) - if query_rewrites_count is not None: - rewrites = f"{rewrites}|count-{query_rewrites_count}" - # Convert highlight_fields from comma-separated string to list highlight_fields_list: Optional[List[str]] = None if highlight_fields is not None: @@ -222,11 +203,8 @@ def _build_search_request( semantic_query=semantic_query, search_fields=search_fields, search_mode=search_mode, - query_language=query_language, - query_speller=query_speller, answers=answers, captions=captions, - semantic_fields=semantic_fields, semantic_configuration_name=semantic_configuration_name, select=select, skip=skip, @@ -237,9 +215,7 @@ def _build_search_request( vector_filter_mode=vector_filter_mode, semantic_error_handling=semantic_error_mode, semantic_max_wait_in_milliseconds=semantic_max_wait_in_milliseconds, - query_rewrites=rewrites, debug=debug, - hybrid_search=hybrid_search, ) @@ -256,7 +232,7 @@ def __init__(self, client, initial_request: _models.SearchRequest, kwargs, conti self._initial_request = initial_request self._kwargs = kwargs self._facets: Optional[Dict[str, List[Dict[str, Any]]]] = None - self._api_version = kwargs.get("api_version", "2025-11-01-preview") + self._api_version = kwargs.get("api_version", "2026-04-01") def _get_next_cb(self, continuation_token): if continuation_token is None: @@ -300,12 +276,6 @@ def get_answers(self) -> Optional[List[_models.QueryAnswerResult]]: response = cast(_models.SearchDocumentsResult, self._response) return cast(Optional[List[_models.QueryAnswerResult]], response.answers) - @_ensure_response - def get_debug_info(self) -> Optional[_models.DebugInfo]: - self.continuation_token = None - response = cast(_models.SearchDocumentsResult, self._response) - return response.debug_info - class SearchItemPaged(ItemPaged[ReturnType]): """A pageable list of search results.""" @@ -360,14 +330,6 @@ def get_answers(self) -> Optional[List[_models.QueryAnswerResult]]: """ return cast(Optional[List[_models.QueryAnswerResult]], self._first_iterator_instance().get_answers()) - def get_debug_info(self) -> _models.DebugInfo: - """Return the debug information for the query. - - :return: the debug information for the query - :rtype: ~azure.search.documents.models.DebugInfo - """ - return cast(_models.DebugInfo, self._first_iterator_instance().get_debug_info()) - class _SearchClientOperationsMixin(_SearchClientOperationsMixinGenerated): """SearchClient operations mixin customizations.""" @@ -543,14 +505,11 @@ def search( semantic_query: Optional[str] = None, search_fields: Optional[List[str]] = None, search_mode: Optional[Union[str, _models.SearchMode]] = None, - query_language: Optional[Union[str, _models.QueryLanguage]] = None, - query_speller: Optional[Union[str, _models.QuerySpellerType]] = None, query_answer: Optional[Union[str, _models.QueryAnswerType]] = None, query_answer_count: Optional[int] = None, query_answer_threshold: Optional[float] = None, query_caption: Optional[Union[str, _models.QueryCaptionType]] = None, query_caption_highlight_enabled: Optional[bool] = None, - semantic_fields: Optional[List[str]] = None, semantic_configuration_name: Optional[str] = None, select: Optional[List[str]] = None, skip: Optional[int] = None, @@ -561,12 +520,7 @@ def search( vector_filter_mode: Optional[Union[str, _models.VectorFilterMode]] = None, semantic_error_mode: Optional[Union[str, _models.SemanticErrorMode]] = None, semantic_max_wait_in_milliseconds: Optional[int] = None, - query_rewrites: Optional[Union[str, _models.QueryRewritesType]] = None, - query_rewrites_count: Optional[int] = None, debug: Optional[Union[str, _models.QueryDebugMode]] = None, - hybrid_search: Optional[_models.HybridSearch] = None, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, **kwargs: Any, ) -> SearchItemPaged[Dict]: # pylint:disable=too-many-locals @@ -620,18 +574,6 @@ def search( :keyword search_mode: A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. Possible values include: 'any', 'all'. :paramtype search_mode: str or ~azure.search.documents.models.SearchMode - :keyword query_language: The language of the search query. Possible values include: "none", "en-us", - "en-gb", "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", "es-mx", "zh-cn", - "zh-tw", "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", "nl-be", "nl-nl", - "hu-hu", "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", "ar-kw", "ar-jo", - "da-dk", "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", "ta-in", "vi-vn", - "el-gr", "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", "et-ee", "ca-es", - "fi-fi", "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", "eu-es", "gl-es", - "gu-in", "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", "te-in", "ur-pk". - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword query_speller: A value that specified the type of the speller to use to spell-correct - individual search query terms. Possible values include: "none", "lexicon". - :paramtype query_speller: str or ~azure.search.documents.models.QuerySpellerType :keyword query_answer: This parameter is only valid if the query type is 'semantic'. If set, the query returns answers extracted from key passages in the highest ranked documents. Possible values include: "none", "extractive". @@ -647,8 +589,6 @@ def search( :keyword bool query_caption_highlight_enabled: This parameter is only valid if the query type is 'semantic' when query caption is set to 'extractive'. Determines whether highlighting is enabled. Defaults to 'true'. - :keyword semantic_fields: The comma-separated list of field names used for semantic ranking. - :paramtype semantic_fields: list[str] :keyword semantic_configuration_name: The name of the semantic configuration that will be used when processing documents for queries of type semantic. :paramtype semantic_configuration_name: str @@ -680,15 +620,6 @@ def search( :paramtype semantic_error_mode: str or ~azure.search.documents.models.SemanticErrorMode :keyword int semantic_max_wait_in_milliseconds: Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails. - :keyword query_rewrites: When QueryRewrites is set to ``generative``\\ , the query terms are sent - to a generate model which will produce 10 (default) rewrites to help increase the recall of the - request. The requested count can be configured by appending the pipe character ``|`` followed - by the ``count-`` option, such as ``generative|count-3``. Defaults to - ``None``. This parameter is only valid if the query type is ``semantic``. Known values are: - "none" and "generative". - :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType - :keyword int query_rewrites_count: This parameter is only valid if the query rewrites type is 'generative'. - Configures the number of rewrites returned. Default count is 10. :keyword debug: Enables a debugging tool that can be used to further explore your Semantic search results. Known values are: "disabled", "speller", "semantic", and "all". :paramtype debug: str or ~azure.search.documents.models.QueryDebugMode @@ -697,15 +628,6 @@ def search( :keyword vector_filter_mode: Determines whether or not filters are applied before or after the vector search is performed. Default is 'preFilter'. Known values are: "postFilter" and "preFilter". :paramtype vector_filter_mode: str or ~azure.search.documents.models.VectorFilterMode - :keyword hybrid_search: The query parameters to configure hybrid search behaviors. - :paramtype hybrid_search: ~azure.search.documents.models.HybridSearch - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str - :keyword enable_elevated_read: A value that enables elevated read that bypass document level - permission checks for the query operation. Default value is None. - :paramtype enable_elevated_read: bool :return: List of search results. :rtype: SearchItemPaged[dict] @@ -753,14 +675,11 @@ def search( semantic_query=semantic_query, search_fields=search_fields, search_mode=search_mode, - query_language=query_language, - query_speller=query_speller, query_answer=query_answer, query_answer_count=query_answer_count, query_answer_threshold=query_answer_threshold, query_caption=query_caption, query_caption_highlight_enabled=query_caption_highlight_enabled, - semantic_fields=semantic_fields, semantic_configuration_name=semantic_configuration_name, select=select, skip=skip, @@ -771,18 +690,11 @@ def search( vector_filter_mode=vector_filter_mode, semantic_error_mode=semantic_error_mode, semantic_max_wait_in_milliseconds=semantic_max_wait_in_milliseconds, - query_rewrites=query_rewrites, - query_rewrites_count=query_rewrites_count, debug=debug, - hybrid_search=hybrid_search, ) # Create kwargs for the search_post call search_kwargs = dict(kwargs) - if query_source_authorization is not None: - search_kwargs["query_source_authorization"] = query_source_authorization - if enable_elevated_read is not None: - search_kwargs["enable_elevated_read"] = enable_elevated_read return SearchItemPaged(self, search_request, search_kwargs, page_iterator_class=SearchPageIterator) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/_patch.py index 32228f20cf50..391b87a4a8ff 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_patch.py @@ -40,10 +40,10 @@ class ApiVersion(str, Enum, metaclass=CaseInsensitiveEnumMeta): V2023_11_01 = "2023-11-01" V2024_07_01 = "2024-07-01" V2025_09_01 = "2025-09-01" - V2025_11_01_PREVIEW = "2025-11-01-preview" + V2026_04_01 = "2026-04-01" -DEFAULT_VERSION = ApiVersion.V2025_11_01_PREVIEW +DEFAULT_VERSION = ApiVersion.V2026_04_01 class SearchClient(_SearchClient): @@ -58,7 +58,7 @@ class SearchClient(_SearchClient): :param index_name: The name of the index. Required. :type index_name: str :keyword api_version: The API version to use for this operation. Default value is - "2025-11-01-preview". Note that overriding this default value may result in unsupported + "2026-04-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/_utils/model_base.py b/sdk/search/azure-search-documents/azure/search/documents/_utils/model_base.py index c402af2afc63..f8511ab0c707 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_utils/model_base.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_utils/model_base.py @@ -22,7 +22,7 @@ from datetime import datetime, date, time, timedelta, timezone from json import JSONEncoder import xml.etree.ElementTree as ET -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint: disable=import-error from typing_extensions import Self import isodate from azure.core.exceptions import DeserializationError @@ -515,6 +515,8 @@ def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: return self._data.setdefault(key, default) def __eq__(self, other: typing.Any) -> bool: + if isinstance(other, _MyMutableMapping): + return self._data == other._data try: other_model = self.__class__(other) except Exception: @@ -628,6 +630,9 @@ def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: if len(items) > 0: existed_attr_keys.append(xml_name) dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + elif not rf._is_optional: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = [] continue # text element is primitive type @@ -690,7 +695,7 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: cls._attr_to_rest_field: dict[str, _RestField] = dict(attr_to_rest_field.items()) cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") - return super().__new__(cls) + return super().__new__(cls) # pylint: disable=no-value-for-parameter def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: for base in cls.__bases__: @@ -889,6 +894,8 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-retur # is it optional? try: if any(a is _NONE_TYPE for a in annotation.__args__): # pyright: ignore + if rf: + rf._is_optional = True if len(annotation.__args__) <= 2: # pyright: ignore if_obj_deserializer = _get_deserialize_callable_from_annotation( next(a for a in annotation.__args__ if a is not _NONE_TYPE), module, rf # pyright: ignore @@ -981,16 +988,20 @@ def _deserialize_with_callable( return float(value.text) if value.text else None if deserializer is bool: return value.text == "true" if value.text else None + if deserializer and deserializer in _DESERIALIZE_MAPPING.values(): + return deserializer(value.text) if value.text else None + if deserializer and deserializer in _DESERIALIZE_MAPPING_WITHFORMAT.values(): + return deserializer(value.text) if value.text else None if deserializer is None: return value if deserializer in [int, float, bool]: return deserializer(value) if isinstance(deserializer, CaseInsensitiveEnumMeta): try: - return deserializer(value) + return deserializer(value.text if isinstance(value, ET.Element) else value) except ValueError: # for unknown value, return raw value - return value + return value.text if isinstance(value, ET.Element) else value if isinstance(deserializer, type) and issubclass(deserializer, Model): return deserializer._deserialize(value, []) return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) @@ -1043,6 +1054,7 @@ def _failsafe_deserialize_xml( return None +# pylint: disable=too-many-instance-attributes class _RestField: def __init__( self, @@ -1062,6 +1074,7 @@ def __init__( self._is_discriminator = is_discriminator self._visibility = visibility self._is_model = False + self._is_optional = False self._default = default self._format = format self._is_multipart_file_input = is_multipart_file_input diff --git a/sdk/search/azure-search-documents/azure/search/documents/_version.py b/sdk/search/azure-search-documents/azure/search/documents/_version.py index a0339c33dc8b..6d71262c832f 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_version.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_version.py @@ -3,6 +3,6 @@ # Licensed under the MIT License. # ------------------------------------ -VERSION = "11.7.0b3" # type: str +VERSION = "11.7.0" # type: str SDK_MONIKER = "search-documents/{}".format(VERSION) # type: str diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_client.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_client.py index e2aa4dc24ac6..ffdcbeeaa1e1 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_client.py @@ -26,7 +26,7 @@ class SearchClient(_SearchClientOperationsMixin): """SearchClient. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. @@ -34,9 +34,9 @@ class SearchClient(_SearchClientOperationsMixin): ~azure.core.credentials_async.AsyncTokenCredential :param index_name: The name of the index. Required. :type index_name: str - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_configuration.py index 5063e52eb2c9..55bb31ca21f5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_configuration.py @@ -23,7 +23,7 @@ class SearchClientConfiguration: # pylint: disable=too-many-instance-attributes Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. @@ -31,9 +31,9 @@ class SearchClientConfiguration: # pylint: disable=too-many-instance-attributes ~azure.core.credentials_async.AsyncTokenCredential :param index_name: The name of the index. Required. :type index_name: str - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ @@ -44,7 +44,7 @@ def __init__( index_name: str, **kwargs: Any, ) -> None: - api_version: str = kwargs.pop("api_version", "2025-11-01-preview") + api_version: str = kwargs.pop("api_version", "2026-04-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_operations.py index 276109af4530..1be2fefa06f9 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_operations.py @@ -6,7 +6,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint: disable=import-error from io import IOBase import json from typing import Any, Callable, IO, Optional, TypeVar, Union, overload @@ -85,6 +85,7 @@ async def get_document_count(self, **kwargs: Any) -> int: } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -109,7 +110,7 @@ async def get_document_count(self, **kwargs: Any) -> int: response_headers["content-type"] = self._deserialize("str", response.headers.get("content-type")) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(int, response.text()) @@ -122,8 +123,6 @@ async def get_document_count(self, **kwargs: Any) -> int: async def _search_get( # pylint: disable=too-many-locals self, *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, search_text: Optional[str] = None, include_total_result_count: Optional[bool] = None, facets: Optional[list[str]] = None, @@ -149,22 +148,11 @@ async def _search_get( # pylint: disable=too-many-locals answers: Optional[Union[str, _models2.QueryAnswerType]] = None, captions: Optional[Union[str, _models2.QueryCaptionType]] = None, semantic_query: Optional[str] = None, - query_rewrites: Optional[Union[str, _models2.QueryRewritesType]] = None, debug: Optional[Union[str, _models2.QueryDebugMode]] = None, - query_language: Optional[Union[str, _models2.QueryLanguage]] = None, - speller: Optional[Union[str, _models2.QuerySpellerType]] = None, - semantic_fields: Optional[list[str]] = None, **kwargs: Any ) -> _models2.SearchDocumentsResult: """Searches for documents in the index. - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str - :keyword enable_elevated_read: A value that enables elevated read that bypass document level - permission checks for the query operation. Default value is None. - :paramtype enable_elevated_read: bool :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to match all documents. Default value is None. :paramtype search_text: str @@ -282,33 +270,10 @@ async def _search_get( # pylint: disable=too-many-locals is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. Default value is None. :paramtype semantic_query: str - :keyword query_rewrites: When QueryRewrites is set to ``generative``, the query terms are sent - to a generate model which will produce 10 (default) rewrites to help increase the recall of the - request. The requested count can be configured by appending the pipe character ``|`` followed - by the ``count-`` option, such as ``generative|count-3``. Defaults to - ``None``. This parameter is only valid if the query type is ``semantic``. Known values are: - "none" and "generative". Default value is None. - :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType :keyword debug: Enables a debugging tool that can be used to further explore your search results. Known values are: "disabled", "semantic", "vector", "queryRewrites", "innerHits", and "all". Default value is None. :paramtype debug: str or ~azure.search.documents.models.QueryDebugMode - :keyword query_language: The language of the query. Known values are: "none", "en-us", "en-gb", - "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", "es-mx", "zh-cn", "zh-tw", - "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", "nl-be", "nl-nl", "hu-hu", - "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", "ar-kw", "ar-jo", "da-dk", - "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", "ta-in", "vi-vn", "el-gr", - "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", "et-ee", "ca-es", "fi-fi", - "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", "eu-es", "gl-es", "gu-in", - "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", "te-in", and "ur-pk". Default - value is None. - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword speller: Improve search recall by spell-correcting individual search query terms. - Known values are: "none" and "lexicon". Default value is None. - :paramtype speller: str or ~azure.search.documents.models.QuerySpellerType - :keyword semantic_fields: The list of field names used for semantic ranking. Default value is - None. - :paramtype semantic_fields: list[str] :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.SearchDocumentsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -328,8 +293,6 @@ async def _search_get( # pylint: disable=too-many-locals _request = build_search_search_get_request( index_name=self._config.index_name, - query_source_authorization=query_source_authorization, - enable_elevated_read=enable_elevated_read, search_text=search_text, include_total_result_count=include_total_result_count, facets=facets, @@ -355,11 +318,7 @@ async def _search_get( # pylint: disable=too-many-locals answers=answers, captions=captions, semantic_query=semantic_query, - query_rewrites=query_rewrites, debug=debug, - query_language=query_language, - speller=speller, - semantic_fields=semantic_fields, api_version=self._config.api_version, headers=_headers, params=_params, @@ -369,6 +328,7 @@ async def _search_get( # pylint: disable=too-many-locals } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -390,7 +350,7 @@ async def _search_get( # pylint: disable=too-many-locals raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchDocumentsResult, response.json()) @@ -403,8 +363,6 @@ async def _search_get( # pylint: disable=too-many-locals async def _search_post( # pylint: disable=too-many-locals self, *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, content_type: str = "application/json", include_total_count: Optional[bool] = None, facets: Optional[list[str]] = None, @@ -423,8 +381,6 @@ async def _search_post( # pylint: disable=too-many-locals search_text: Optional[str] = None, search_fields: Optional[list[str]] = None, search_mode: Optional[Union[str, _models2.SearchMode]] = None, - query_language: Optional[Union[str, _models2.QueryLanguage]] = None, - query_speller: Optional[Union[str, _models2.QuerySpellerType]] = None, select: Optional[list[str]] = None, skip: Optional[int] = None, top: Optional[int] = None, @@ -434,32 +390,17 @@ async def _search_post( # pylint: disable=too-many-locals semantic_query: Optional[str] = None, answers: Optional[Union[str, _models2.QueryAnswerType]] = None, captions: Optional[Union[str, _models2.QueryCaptionType]] = None, - query_rewrites: Optional[Union[str, _models2.QueryRewritesType]] = None, - semantic_fields: Optional[list[str]] = None, vector_queries: Optional[list[_models2.VectorQuery]] = None, vector_filter_mode: Optional[Union[str, _models2.VectorFilterMode]] = None, - hybrid_search: Optional[_models2.HybridSearch] = None, **kwargs: Any ) -> _models2.SearchDocumentsResult: ... @overload async def _search_post( - self, - body: JSON, - *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, - content_type: str = "application/json", - **kwargs: Any + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models2.SearchDocumentsResult: ... @overload async def _search_post( - self, - body: IO[bytes], - *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, - content_type: str = "application/json", - **kwargs: Any + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models2.SearchDocumentsResult: ... @distributed_trace_async @@ -467,8 +408,6 @@ async def _search_post( # pylint: disable=too-many-locals self, body: Union[JSON, IO[bytes]] = _Unset, *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, include_total_count: Optional[bool] = None, facets: Optional[list[str]] = None, filter: Optional[str] = None, @@ -486,8 +425,6 @@ async def _search_post( # pylint: disable=too-many-locals search_text: Optional[str] = None, search_fields: Optional[list[str]] = None, search_mode: Optional[Union[str, _models2.SearchMode]] = None, - query_language: Optional[Union[str, _models2.QueryLanguage]] = None, - query_speller: Optional[Union[str, _models2.QuerySpellerType]] = None, select: Optional[list[str]] = None, skip: Optional[int] = None, top: Optional[int] = None, @@ -497,24 +434,14 @@ async def _search_post( # pylint: disable=too-many-locals semantic_query: Optional[str] = None, answers: Optional[Union[str, _models2.QueryAnswerType]] = None, captions: Optional[Union[str, _models2.QueryCaptionType]] = None, - query_rewrites: Optional[Union[str, _models2.QueryRewritesType]] = None, - semantic_fields: Optional[list[str]] = None, vector_queries: Optional[list[_models2.VectorQuery]] = None, vector_filter_mode: Optional[Union[str, _models2.VectorFilterMode]] = None, - hybrid_search: Optional[_models2.HybridSearch] = None, **kwargs: Any ) -> _models2.SearchDocumentsResult: """Searches for documents in the index. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str - :keyword enable_elevated_read: A value that enables elevated read that bypass document level - permission checks for the query operation. Default value is None. - :paramtype enable_elevated_read: bool :keyword include_total_count: A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. Default value is None. @@ -588,19 +515,6 @@ async def _search_post( # pylint: disable=too-many-locals matched in order to count the document as a match. Known values are: "any" and "all". Default value is None. :paramtype search_mode: str or ~azure.search.documents.models.SearchMode - :keyword query_language: A value that specifies the language of the search query. Known values - are: "none", "en-us", "en-gb", "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", - "es-mx", "zh-cn", "zh-tw", "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", - "nl-be", "nl-nl", "hu-hu", "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", - "ar-kw", "ar-jo", "da-dk", "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", - "ta-in", "vi-vn", "el-gr", "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", - "et-ee", "ca-es", "fi-fi", "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", - "eu-es", "gl-es", "gu-in", "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", - "te-in", and "ur-pk". Default value is None. - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword query_speller: A value that specifies the type of the speller to use to spell-correct - individual search query terms. Known values are: "none" and "lexicon". Default value is None. - :paramtype query_speller: str or ~azure.search.documents.models.QuerySpellerType :keyword select: The comma-separated list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. Default value is None. :paramtype select: list[str] @@ -636,12 +550,6 @@ async def _search_post( # pylint: disable=too-many-locals :keyword captions: A value that specifies whether captions should be returned as part of the search response. Known values are: "none" and "extractive". Default value is None. :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType - :keyword query_rewrites: A value that specifies whether query rewrites should be generated to - augment the search query. Known values are: "none" and "generative". Default value is None. - :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType - :keyword semantic_fields: The comma-separated list of field names used for semantic ranking. - Default value is None. - :paramtype semantic_fields: list[str] :keyword vector_queries: The query parameters for vector and hybrid search queries. Default value is None. :paramtype vector_queries: list[~azure.search.documents.models.VectorQuery] @@ -649,9 +557,6 @@ async def _search_post( # pylint: disable=too-many-locals vector search is performed. Default is 'preFilter' for new indexes. Known values are: "postFilter", "preFilter", and "strictPostFilter". Default value is None. :paramtype vector_filter_mode: str or ~azure.search.documents.models.VectorFilterMode - :keyword hybrid_search: The query parameters to configure hybrid search behaviors. Default - value is None. - :paramtype hybrid_search: ~azure.search.documents.models.HybridSearch :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.SearchDocumentsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -681,11 +586,8 @@ async def _search_post( # pylint: disable=too-many-locals "highlight": highlight_fields, "highlightPostTag": highlight_post_tag, "highlightPreTag": highlight_pre_tag, - "hybridSearch": hybrid_search, "minimumCoverage": minimum_coverage, "orderby": order_by, - "queryLanguage": query_language, - "queryRewrites": query_rewrites, "queryType": query_type, "scoringParameters": scoring_parameters, "scoringProfile": scoring_profile, @@ -696,12 +598,10 @@ async def _search_post( # pylint: disable=too-many-locals "select": select, "semanticConfiguration": semantic_configuration_name, "semanticErrorHandling": semantic_error_handling, - "semanticFields": semantic_fields, "semanticMaxWaitInMilliseconds": semantic_max_wait_in_milliseconds, "semanticQuery": semantic_query, "sessionId": session_id, "skip": skip, - "speller": query_speller, "top": top, "vectorFilterMode": vector_filter_mode, "vectorQueries": vector_queries, @@ -716,8 +616,6 @@ async def _search_post( # pylint: disable=too-many-locals _request = build_search_search_post_request( index_name=self._config.index_name, - query_source_authorization=query_source_authorization, - enable_elevated_read=enable_elevated_read, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -729,6 +627,7 @@ async def _search_post( # pylint: disable=too-many-locals } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -750,7 +649,7 @@ async def _search_post( # pylint: disable=too-many-locals raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchDocumentsResult, response.json()) @@ -761,25 +660,12 @@ async def _search_post( # pylint: disable=too-many-locals @distributed_trace_async async def get_document( - self, - key: str, - *, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, - selected_fields: Optional[list[str]] = None, - **kwargs: Any + self, key: str, *, selected_fields: Optional[list[str]] = None, **kwargs: Any ) -> _models2.LookupDocument: """Retrieves a document from the index. :param key: The key of the document to retrieve. Required. :type key: str - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str - :keyword enable_elevated_read: A value that enables elevated read that bypass document level - permission checks for the query operation. Default value is None. - :paramtype enable_elevated_read: bool :keyword selected_fields: List of field names to retrieve for the document; Any field not retrieved will be missing from the returned document. Default value is None. :paramtype selected_fields: list[str] @@ -803,8 +689,6 @@ async def get_document( _request = build_search_get_document_request( key=key, index_name=self._config.index_name, - query_source_authorization=query_source_authorization, - enable_elevated_read=enable_elevated_read, selected_fields=selected_fields, api_version=self._config.api_version, headers=_headers, @@ -815,6 +699,7 @@ async def get_document( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -836,7 +721,7 @@ async def get_document( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.LookupDocument, response.json()) @@ -947,6 +832,7 @@ async def _suggest_get( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -968,7 +854,7 @@ async def _suggest_get( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models2._models.SuggestDocumentsResult, response.json() # pylint: disable=protected-access @@ -1129,6 +1015,7 @@ async def _suggest_post( # pylint: disable=too-many-locals } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1150,7 +1037,7 @@ async def _suggest_post( # pylint: disable=too-many-locals raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models2._models.SuggestDocumentsResult, response.json() # pylint: disable=protected-access @@ -1221,6 +1108,7 @@ async def _index( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1242,7 +1130,7 @@ async def _index( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models2._models.IndexDocumentsResult, response.json() # pylint: disable=protected-access @@ -1259,7 +1147,7 @@ async def _autocomplete_get( *, search_text: str, suggester_name: str, - autocomplete_mode: Optional[Union[str, _models2.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models2._enums.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -1345,6 +1233,7 @@ async def _autocomplete_get( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1366,7 +1255,7 @@ async def _autocomplete_get( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models2._models.AutocompleteResult, response.json() # pylint: disable=protected-access @@ -1384,7 +1273,7 @@ async def _autocomplete_post( search_text: str, suggester_name: str, content_type: str = "application/json", - autocomplete_mode: Optional[Union[str, _models2.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models2._enums.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -1410,7 +1299,7 @@ async def _autocomplete_post( # pylint: disable=too-many-locals *, search_text: str = _Unset, suggester_name: str = _Unset, - autocomplete_mode: Optional[Union[str, _models2.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models2._enums.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -1517,6 +1406,7 @@ async def _autocomplete_post( # pylint: disable=too-many-locals } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1538,7 +1428,7 @@ async def _autocomplete_post( # pylint: disable=too-many-locals raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models2._models.AutocompleteResult, response.json() # pylint: disable=protected-access diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_patch.py index 64c5eaf33932..e4352e3e6121 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_patch.py @@ -54,7 +54,7 @@ def __init__(self, client, initial_request: _models.SearchRequest, kwargs, conti self._initial_request = initial_request self._kwargs = kwargs self._facets: Optional[Dict[str, List[Dict[str, Any]]]] = None - self._api_version = kwargs.get("api_version", "2025-11-01-preview") + self._api_version = kwargs.get("api_version", "2026-04-01") async def _get_next_cb(self, continuation_token): if continuation_token is None: @@ -100,12 +100,6 @@ async def get_answers(self) -> Optional[List[_models.QueryAnswerResult]]: response = cast(_models.SearchDocumentsResult, self._response) return response.answers - @_ensure_response - async def get_debug_info(self) -> Optional[_models.DebugInfo]: - self.continuation_token = None - response = cast(_models.SearchDocumentsResult, self._response) - return response.debug_info - class AsyncSearchItemPaged(AsyncItemPaged[ReturnType]): """An async pageable list of search results.""" @@ -168,14 +162,6 @@ async def get_answers(self) -> Optional[List[_models.QueryAnswerResult]]: """ return cast(Optional[List[_models.QueryAnswerResult]], await self._first_iterator_instance().get_answers()) - async def get_debug_info(self) -> _models.DebugInfo: - """Return the debug information for the query. - - :return: the debug information for the query - :rtype: ~azure.search.documents.models.DebugInfo - """ - return cast(_models.DebugInfo, await self._first_iterator_instance().get_debug_info()) - class _SearchClientOperationsMixin(_SearchClientOperationsMixinGenerated): """Async SearchClient operations mixin customizations.""" @@ -350,14 +336,11 @@ async def search( semantic_query: Optional[str] = None, search_fields: Optional[List[str]] = None, search_mode: Optional[Union[str, _models.SearchMode]] = None, - query_language: Optional[Union[str, _models.QueryLanguage]] = None, - query_speller: Optional[Union[str, _models.QuerySpellerType]] = None, query_answer: Optional[Union[str, _models.QueryAnswerType]] = None, query_answer_count: Optional[int] = None, query_answer_threshold: Optional[float] = None, query_caption: Optional[Union[str, _models.QueryCaptionType]] = None, query_caption_highlight_enabled: Optional[bool] = None, - semantic_fields: Optional[List[str]] = None, semantic_configuration_name: Optional[str] = None, select: Optional[List[str]] = None, skip: Optional[int] = None, @@ -368,12 +351,7 @@ async def search( vector_filter_mode: Optional[Union[str, _models.VectorFilterMode]] = None, semantic_error_mode: Optional[Union[str, _models.SemanticErrorMode]] = None, semantic_max_wait_in_milliseconds: Optional[int] = None, - query_rewrites: Optional[Union[str, _models.QueryRewritesType]] = None, - query_rewrites_count: Optional[int] = None, debug: Optional[Union[str, _models.QueryDebugMode]] = None, - hybrid_search: Optional[_models.HybridSearch] = None, - query_source_authorization: Optional[str] = None, - enable_elevated_read: Optional[bool] = None, **kwargs: Any, ) -> AsyncSearchItemPaged[Dict]: # pylint:disable=too-many-locals @@ -427,18 +405,6 @@ async def search( :keyword search_mode: A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. Possible values include: 'any', 'all'. :paramtype search_mode: str or ~azure.search.documents.models.SearchMode - :keyword query_language: The language of the search query. Possible values include: "none", "en-us", - "en-gb", "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", "es-mx", "zh-cn", - "zh-tw", "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", "nl-be", "nl-nl", - "hu-hu", "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", "ar-kw", "ar-jo", - "da-dk", "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", "ta-in", "vi-vn", - "el-gr", "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", "et-ee", "ca-es", - "fi-fi", "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", "eu-es", "gl-es", - "gu-in", "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", "te-in", "ur-pk". - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword query_speller: A value that specified the type of the speller to use to spell-correct - individual search query terms. Possible values include: "none", "lexicon". - :paramtype query_speller: str or ~azure.search.documents.models.QuerySpellerType :keyword query_answer: This parameter is only valid if the query type is 'semantic'. If set, the query returns answers extracted from key passages in the highest ranked documents. Possible values include: "none", "extractive". @@ -454,8 +420,6 @@ async def search( :keyword bool query_caption_highlight_enabled: This parameter is only valid if the query type is 'semantic' when query caption is set to 'extractive'. Determines whether highlighting is enabled. Defaults to 'true'. - :keyword semantic_fields: The comma-separated list of field names used for semantic ranking. - :paramtype semantic_fields: list[str] :keyword semantic_configuration_name: The name of the semantic configuration that will be used when processing documents for queries of type semantic. :paramtype semantic_configuration_name: str @@ -487,15 +451,6 @@ async def search( :paramtype semantic_error_mode: str or ~azure.search.documents.models.SemanticErrorMode :keyword int semantic_max_wait_in_milliseconds: Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails. - :keyword query_rewrites: When QueryRewrites is set to ``generative``\\ , the query terms are sent - to a generate model which will produce 10 (default) rewrites to help increase the recall of the - request. The requested count can be configured by appending the pipe character ``|`` followed - by the ``count-`` option, such as ``generative|count-3``. Defaults to - ``None``. This parameter is only valid if the query type is ``semantic``. Known values are: - "none" and "generative". - :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType - :keyword int query_rewrites_count: This parameter is only valid if the query rewrites type is 'generative'. - Configures the number of rewrites returned. Default count is 10. :keyword debug: Enables a debugging tool that can be used to further explore your Semantic search results. Known values are: "disabled", "speller", "semantic", and "all". :paramtype debug: str or ~azure.search.documents.models.QueryDebugMode @@ -504,15 +459,6 @@ async def search( :keyword vector_filter_mode: Determines whether or not filters are applied before or after the vector search is performed. Default is 'preFilter'. Known values are: "postFilter" and "preFilter". :paramtype vector_filter_mode: str or ~azure.search.documents.models.VectorFilterMode - :keyword hybrid_search: The query parameters to configure hybrid search behaviors. - :paramtype hybrid_search: ~azure.search.documents.models.HybridSearch - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str - :keyword enable_elevated_read: A value that enables elevated read that bypass document level - permission checks for the query operation. Default value is None. - :paramtype enable_elevated_read: bool :return: A list of documents (dicts) matching the specified search criteria. :return: List of search results. :rtype: AsyncSearchItemPaged[dict] @@ -561,14 +507,11 @@ async def search( semantic_query=semantic_query, search_fields=search_fields, search_mode=search_mode, - query_language=query_language, - query_speller=query_speller, query_answer=query_answer, query_answer_count=query_answer_count, query_answer_threshold=query_answer_threshold, query_caption=query_caption, query_caption_highlight_enabled=query_caption_highlight_enabled, - semantic_fields=semantic_fields, semantic_configuration_name=semantic_configuration_name, select=select, skip=skip, @@ -579,18 +522,11 @@ async def search( vector_filter_mode=vector_filter_mode, semantic_error_mode=semantic_error_mode, semantic_max_wait_in_milliseconds=semantic_max_wait_in_milliseconds, - query_rewrites=query_rewrites, - query_rewrites_count=query_rewrites_count, debug=debug, - hybrid_search=hybrid_search, ) # Create kwargs for the search_post call search_kwargs = dict(kwargs) - if query_source_authorization is not None: - search_kwargs["query_source_authorization"] = query_source_authorization - if enable_elevated_read is not None: - search_kwargs["enable_elevated_read"] = enable_elevated_read return AsyncSearchItemPaged(self, search_request, search_kwargs, page_iterator_class=AsyncSearchPageIterator) diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_patch.py index 3eed18136470..1947a709dd72 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_patch.py @@ -36,7 +36,7 @@ class SearchClient(_SearchClient): :param index_name: The name of the index. Required. :type index_name: str :keyword api_version: The API version to use for this operation. Default value is - "2025-11-01-preview". Note that overriding this default value may result in unsupported + "2026-04-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_client.py index 4a3b1a14824f..711ac33e5401 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_client.py @@ -26,15 +26,15 @@ class SearchIndexClient(_SearchIndexClientOperationsMixin): """SearchIndexClient. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ @@ -105,15 +105,15 @@ def __exit__(self, *exc_details: Any) -> None: class SearchIndexerClient(_SearchIndexerClientOperationsMixin): """SearchIndexerClient. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_configuration.py index ccbce6e25376..d9f83e1e0304 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_configuration.py @@ -23,20 +23,20 @@ class SearchIndexClientConfiguration: # pylint: disable=too-many-instance-attri Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2025-11-01-preview") + api_version: str = kwargs.pop("api_version", "2026-04-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") @@ -78,20 +78,20 @@ class SearchIndexerClientConfiguration: # pylint: disable=too-many-instance-att Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2025-11-01-preview") + api_version: str = kwargs.pop("api_version", "2026-04-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_operations.py index 379ceb21d9d2..6d991dd232df 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_operations.py @@ -6,7 +6,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint: disable=import-error from io import IOBase import json from typing import Any, Callable, IO, Literal, Optional, TypeVar, Union, overload @@ -54,7 +54,7 @@ def build_search_index_create_or_update_synonym_map_request( # pylint: disable= prefer: Literal["return=representation"] = kwargs.pop("prefer", _headers.pop("Prefer", "return=representation")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -69,7 +69,8 @@ def build_search_index_create_or_update_synonym_map_request( # pylint: disable= _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -89,7 +90,7 @@ def build_search_index_delete_synonym_map_request( # pylint: disable=name-too-l _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -104,7 +105,8 @@ def build_search_index_delete_synonym_map_request( # pylint: disable=name-too-l _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") @@ -121,7 +123,7 @@ def build_search_index_get_synonym_map_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -136,7 +138,8 @@ def build_search_index_get_synonym_map_request( # pylint: disable=name-too-long _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -147,7 +150,7 @@ def build_search_index_get_synonym_maps_request( # pylint: disable=name-too-lon _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -159,7 +162,8 @@ def build_search_index_get_synonym_maps_request( # pylint: disable=name-too-lon _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -169,7 +173,7 @@ def build_search_index_create_synonym_map_request(**kwargs: Any) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -179,7 +183,8 @@ def build_search_index_create_synonym_map_request(**kwargs: Any) -> HttpRequest: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -199,7 +204,7 @@ def build_search_index_create_or_update_index_request( # pylint: disable=name-t prefer: Literal["return=representation"] = kwargs.pop("prefer", _headers.pop("Prefer", "return=representation")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -216,7 +221,8 @@ def build_search_index_create_or_update_index_request( # pylint: disable=name-t _params["allowIndexDowntime"] = _SERIALIZER.query("allow_index_downtime", allow_index_downtime, "bool") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -236,7 +242,7 @@ def build_search_index_delete_index_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -251,7 +257,8 @@ def build_search_index_delete_index_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") @@ -266,7 +273,7 @@ def build_search_index_get_index_request(name: str, **kwargs: Any) -> HttpReques _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -281,7 +288,8 @@ def build_search_index_get_index_request(name: str, **kwargs: Any) -> HttpReques _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -290,7 +298,7 @@ def build_search_index_list_indexes_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -300,7 +308,8 @@ def build_search_index_list_indexes_request(**kwargs: Any) -> HttpRequest: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -311,7 +320,7 @@ def build_search_index_list_indexes_with_selected_properties_request( # pylint: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -323,7 +332,8 @@ def build_search_index_list_indexes_with_selected_properties_request( # pylint: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -333,7 +343,7 @@ def build_search_index_create_index_request(**kwargs: Any) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -343,7 +353,8 @@ def build_search_index_create_index_request(**kwargs: Any) -> HttpRequest: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -356,7 +367,7 @@ def build_search_index_get_index_statistics_request( # pylint: disable=name-too _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -371,7 +382,8 @@ def build_search_index_get_index_statistics_request( # pylint: disable=name-too _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -381,7 +393,7 @@ def build_search_index_analyze_text_request(name: str, **kwargs: Any) -> HttpReq _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -396,7 +408,8 @@ def build_search_index_analyze_text_request(name: str, **kwargs: Any) -> HttpReq _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -411,7 +424,7 @@ def build_search_index_create_or_update_alias_request( # pylint: disable=name-t prefer: Literal["return=representation"] = kwargs.pop("prefer", _headers.pop("Prefer", "return=representation")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -426,7 +439,8 @@ def build_search_index_create_or_update_alias_request( # pylint: disable=name-t _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -446,7 +460,7 @@ def build_search_index_delete_alias_request( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -461,7 +475,8 @@ def build_search_index_delete_alias_request( _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") @@ -476,7 +491,7 @@ def build_search_index_get_alias_request(name: str, **kwargs: Any) -> HttpReques _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -491,7 +506,8 @@ def build_search_index_get_alias_request(name: str, **kwargs: Any) -> HttpReques _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -500,7 +516,7 @@ def build_search_index_list_aliases_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -510,7 +526,8 @@ def build_search_index_list_aliases_request(**kwargs: Any) -> HttpRequest: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -520,7 +537,7 @@ def build_search_index_create_alias_request(**kwargs: Any) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -530,7 +547,8 @@ def build_search_index_create_alias_request(**kwargs: Any) -> HttpRequest: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -545,7 +563,7 @@ def build_search_index_create_or_update_knowledge_base_request( # pylint: disab prefer: Literal["return=representation"] = kwargs.pop("prefer", _headers.pop("Prefer", "return=representation")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -560,7 +578,8 @@ def build_search_index_create_or_update_knowledge_base_request( # pylint: disab _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -580,7 +599,7 @@ def build_search_index_delete_knowledge_base_request( # pylint: disable=name-to _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -595,7 +614,8 @@ def build_search_index_delete_knowledge_base_request( # pylint: disable=name-to _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") @@ -612,7 +632,7 @@ def build_search_index_get_knowledge_base_request( # pylint: disable=name-too-l _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -627,7 +647,8 @@ def build_search_index_get_knowledge_base_request( # pylint: disable=name-too-l _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -636,7 +657,7 @@ def build_search_index_list_knowledge_bases_request(**kwargs: Any) -> HttpReques _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -646,7 +667,8 @@ def build_search_index_list_knowledge_bases_request(**kwargs: Any) -> HttpReques _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -656,7 +678,7 @@ def build_search_index_create_knowledge_base_request(**kwargs: Any) -> HttpReque _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -666,7 +688,8 @@ def build_search_index_create_knowledge_base_request(**kwargs: Any) -> HttpReque _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -681,7 +704,7 @@ def build_search_index_create_or_update_knowledge_source_request( # pylint: dis prefer: Literal["return=representation"] = kwargs.pop("prefer", _headers.pop("Prefer", "return=representation")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -696,7 +719,8 @@ def build_search_index_create_or_update_knowledge_source_request( # pylint: dis _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -716,7 +740,7 @@ def build_search_index_delete_knowledge_source_request( # pylint: disable=name- _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -731,7 +755,8 @@ def build_search_index_delete_knowledge_source_request( # pylint: disable=name- _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") @@ -748,7 +773,7 @@ def build_search_index_get_knowledge_source_request( # pylint: disable=name-too _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -763,7 +788,8 @@ def build_search_index_get_knowledge_source_request( # pylint: disable=name-too _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -772,7 +798,7 @@ def build_search_index_list_knowledge_sources_request(**kwargs: Any) -> HttpRequ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -782,7 +808,8 @@ def build_search_index_list_knowledge_sources_request(**kwargs: Any) -> HttpRequ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -792,7 +819,7 @@ def build_search_index_create_knowledge_source_request(**kwargs: Any) -> HttpReq _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -802,7 +829,8 @@ def build_search_index_create_knowledge_source_request(**kwargs: Any) -> HttpReq _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -815,7 +843,7 @@ def build_search_index_get_knowledge_source_status_request( # pylint: disable=n _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -830,7 +858,8 @@ def build_search_index_get_knowledge_source_status_request( # pylint: disable=n _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -839,7 +868,7 @@ def build_search_index_get_service_statistics_request(**kwargs: Any) -> HttpRequ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -849,44 +878,21 @@ def build_search_index_get_service_statistics_request(**kwargs: Any) -> HttpRequ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_search_index_list_index_stats_summary_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) - accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") - - # Construct URL - _url = "/indexstats" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) def build_search_indexer_create_or_update_data_source_connection_request( # pylint: disable=name-too-long - name: str, - *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - etag: Optional[str] = None, - match_condition: Optional[MatchConditions] = None, - **kwargs: Any, + name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) prefer: Literal["return=representation"] = kwargs.pop("prefer", _headers.pop("Prefer", "return=representation")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -899,13 +905,10 @@ def build_search_indexer_create_or_update_data_source_connection_request( # pyl # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if skip_indexer_reset_requirement_for_cache is not None: - _params["ignoreResetRequirements"] = _SERIALIZER.query( - "skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, "bool" - ) # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -925,7 +928,7 @@ def build_search_indexer_delete_data_source_connection_request( # pylint: disab _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -940,7 +943,8 @@ def build_search_indexer_delete_data_source_connection_request( # pylint: disab _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") @@ -957,7 +961,7 @@ def build_search_indexer_get_data_source_connection_request( # pylint: disable= _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -972,7 +976,8 @@ def build_search_indexer_get_data_source_connection_request( # pylint: disable= _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -983,7 +988,7 @@ def build_search_indexer_get_data_source_connections_request( # pylint: disable _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -995,7 +1000,8 @@ def build_search_indexer_get_data_source_connections_request( # pylint: disable _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -1007,7 +1013,7 @@ def build_search_indexer_create_data_source_connection_request( # pylint: disab _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1017,7 +1023,8 @@ def build_search_indexer_create_data_source_connection_request( # pylint: disab _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -1030,7 +1037,7 @@ def build_search_indexer_reset_indexer_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1045,65 +1052,8 @@ def build_search_indexer_reset_indexer_request( # pylint: disable=name-too-long _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_search_indexer_resync_request(name: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) - accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") - - # Construct URL - _url = "/indexers('{indexerName}')/search.resync" - path_format_arguments = { - "indexerName": _SERIALIZER.url("name", name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_search_indexer_reset_documents_request( # pylint: disable=name-too-long - name: str, *, overwrite: Optional[bool] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) - accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") - - # Construct URL - _url = "/indexers('{indexerName}')/search.resetdocs" - path_format_arguments = { - "indexerName": _SERIALIZER.url("name", name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if overwrite is not None: - _params["overwrite"] = _SERIALIZER.query("overwrite", overwrite, "bool") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -1112,7 +1062,7 @@ def build_search_indexer_run_indexer_request(name: str, **kwargs: Any) -> HttpRe _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1127,26 +1077,21 @@ def build_search_indexer_run_indexer_request(name: str, **kwargs: Any) -> HttpRe _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) def build_search_indexer_create_or_update_indexer_request( # pylint: disable=name-too-long - name: str, - *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - etag: Optional[str] = None, - match_condition: Optional[MatchConditions] = None, - **kwargs: Any, + name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) prefer: Literal["return=representation"] = kwargs.pop("prefer", _headers.pop("Prefer", "return=representation")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1159,17 +1104,10 @@ def build_search_indexer_create_or_update_indexer_request( # pylint: disable=na # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if skip_indexer_reset_requirement_for_cache is not None: - _params["ignoreResetRequirements"] = _SERIALIZER.query( - "skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, "bool" - ) - if disable_cache_reprocessing_change_detection is not None: - _params["disableCacheReprocessingChangeDetection"] = _SERIALIZER.query( - "disable_cache_reprocessing_change_detection", disable_cache_reprocessing_change_detection, "bool" - ) # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -1189,7 +1127,7 @@ def build_search_indexer_delete_indexer_request( # pylint: disable=name-too-lon _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1204,7 +1142,8 @@ def build_search_indexer_delete_indexer_request( # pylint: disable=name-too-lon _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") @@ -1219,7 +1158,7 @@ def build_search_indexer_get_indexer_request(name: str, **kwargs: Any) -> HttpRe _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1234,7 +1173,8 @@ def build_search_indexer_get_indexer_request(name: str, **kwargs: Any) -> HttpRe _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -1245,7 +1185,7 @@ def build_search_indexer_get_indexers_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1257,7 +1197,8 @@ def build_search_indexer_get_indexers_request( # pylint: disable=name-too-long _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -1267,7 +1208,7 @@ def build_search_indexer_create_indexer_request(**kwargs: Any) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1277,7 +1218,8 @@ def build_search_indexer_create_indexer_request(**kwargs: Any) -> HttpRequest: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -1290,7 +1232,7 @@ def build_search_indexer_get_indexer_status_request( # pylint: disable=name-too _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1305,26 +1247,21 @@ def build_search_indexer_get_indexer_status_request( # pylint: disable=name-too _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) def build_search_indexer_create_or_update_skillset_request( # pylint: disable=name-too-long - name: str, - *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - etag: Optional[str] = None, - match_condition: Optional[MatchConditions] = None, - **kwargs: Any, + name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) prefer: Literal["return=representation"] = kwargs.pop("prefer", _headers.pop("Prefer", "return=representation")) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1337,17 +1274,10 @@ def build_search_indexer_create_or_update_skillset_request( # pylint: disable=n # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if skip_indexer_reset_requirement_for_cache is not None: - _params["ignoreResetRequirements"] = _SERIALIZER.query( - "skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, "bool" - ) - if disable_cache_reprocessing_change_detection is not None: - _params["disableCacheReprocessingChangeDetection"] = _SERIALIZER.query( - "disable_cache_reprocessing_change_detection", disable_cache_reprocessing_change_detection, "bool" - ) # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -1367,7 +1297,7 @@ def build_search_indexer_delete_skillset_request( # pylint: disable=name-too-lo _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1382,7 +1312,8 @@ def build_search_indexer_delete_skillset_request( # pylint: disable=name-too-lo _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if_match = prep_if_match(etag, match_condition) if if_match is not None: _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") @@ -1397,7 +1328,7 @@ def build_search_indexer_get_skillset_request(name: str, **kwargs: Any) -> HttpR _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1412,7 +1343,8 @@ def build_search_indexer_get_skillset_request(name: str, **kwargs: Any) -> HttpR _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -1423,7 +1355,7 @@ def build_search_indexer_get_skillsets_request( # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1435,7 +1367,8 @@ def build_search_indexer_get_skillsets_request( # pylint: disable=name-too-long _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -1445,7 +1378,7 @@ def build_search_indexer_create_skillset_request(**kwargs: Any) -> HttpRequest: _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -1455,34 +1388,8 @@ def build_search_indexer_create_skillset_request(**kwargs: Any) -> HttpRequest: _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_search_indexer_reset_skills_request(name: str, **kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) - accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") - - # Construct URL - _url = "/skillsets('{skillsetName}')/search.resetskills" - path_format_arguments = { - "skillsetName": _SERIALIZER.url("name", name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -1597,6 +1504,7 @@ def _create_or_update_synonym_map( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1618,7 +1526,7 @@ def _create_or_update_synonym_map( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SynonymMap, response.json()) @@ -1728,6 +1636,7 @@ def get_synonym_map(self, name: str, **kwargs: Any) -> _models1.SynonymMap: } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1749,7 +1658,7 @@ def get_synonym_map(self, name: str, **kwargs: Any) -> _models1.SynonymMap: raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SynonymMap, response.json()) @@ -1796,6 +1705,7 @@ def _get_synonym_maps( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1817,7 +1727,7 @@ def _get_synonym_maps( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models1._models.ListSynonymMapsResult, response.json() # pylint: disable=protected-access @@ -1922,6 +1832,7 @@ def create_synonym_map( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1943,7 +1854,7 @@ def create_synonym_map( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SynonymMap, response.json()) @@ -2067,6 +1978,7 @@ def _create_or_update_index( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2088,7 +2000,7 @@ def _create_or_update_index( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndex, response.json()) @@ -2200,6 +2112,7 @@ def get_index(self, name: str, **kwargs: Any) -> _models1.SearchIndex: } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2221,7 +2134,7 @@ def get_index(self, name: str, **kwargs: Any) -> _models1.SearchIndex: raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndex, response.json()) @@ -2290,7 +2203,10 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models1.SearchIndex], deserialized.get("value", [])) + list_of_elem = _deserialize( + list[_models1.SearchIndex], + deserialized.get("value", []), + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, iter(list_of_elem) @@ -2383,9 +2299,11 @@ def prepare_request(next_link=None): return _request def extract_data(pipeline_response): - # pylint: disable=protected-access deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models1._models.SearchIndexResponse], deserialized.get("value", [])) + list_of_elem = _deserialize( + list[_models1._models.SearchIndexResponse], # pylint: disable=protected-access + deserialized.get("value", []), + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, iter(list_of_elem) @@ -2503,6 +2421,7 @@ def create_index(self, index: Union[_models1.SearchIndex, JSON, IO[bytes]], **kw } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2524,7 +2443,7 @@ def create_index(self, index: Union[_models1.SearchIndex, JSON, IO[bytes]], **kw raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndex, response.json()) @@ -2568,6 +2487,7 @@ def _get_index_statistics(self, name: str, **kwargs: Any) -> _models1.GetIndexSt } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2589,7 +2509,7 @@ def _get_index_statistics(self, name: str, **kwargs: Any) -> _models1.GetIndexSt raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.GetIndexStatisticsResult, response.json()) @@ -2660,6 +2580,7 @@ def _analyze_text( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2681,7 +2602,7 @@ def _analyze_text( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.AnalyzeResult, response.json()) @@ -2794,6 +2715,7 @@ def _create_or_update_alias( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2815,7 +2737,7 @@ def _create_or_update_alias( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchAlias, response.json()) @@ -2926,6 +2848,7 @@ def get_alias(self, name: str, **kwargs: Any) -> _models1.SearchAlias: } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2947,7 +2870,7 @@ def get_alias(self, name: str, **kwargs: Any) -> _models1.SearchAlias: raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchAlias, response.json()) @@ -3016,7 +2939,10 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models1.SearchAlias], deserialized.get("value", [])) + list_of_elem = _deserialize( + list[_models1.SearchAlias], + deserialized.get("value", []), + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, iter(list_of_elem) @@ -3134,6 +3060,7 @@ def create_alias(self, alias: Union[_models1.SearchAlias, JSON, IO[bytes]], **kw } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -3155,7 +3082,7 @@ def create_alias(self, alias: Union[_models1.SearchAlias, JSON, IO[bytes]], **kw raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchAlias, response.json()) @@ -3268,6 +3195,7 @@ def _create_or_update_knowledge_base( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -3289,7 +3217,7 @@ def _create_or_update_knowledge_base( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.KnowledgeBase, response.json()) @@ -3399,6 +3327,7 @@ def get_knowledge_base(self, name: str, **kwargs: Any) -> _models1.KnowledgeBase } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -3420,7 +3349,7 @@ def get_knowledge_base(self, name: str, **kwargs: Any) -> _models1.KnowledgeBase raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.KnowledgeBase, response.json()) @@ -3489,7 +3418,10 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models1.KnowledgeBase], deserialized.get("value", [])) + list_of_elem = _deserialize( + list[_models1.KnowledgeBase], + deserialized.get("value", []), + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, iter(list_of_elem) @@ -3609,6 +3541,7 @@ def create_knowledge_base( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -3630,7 +3563,7 @@ def create_knowledge_base( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.KnowledgeBase, response.json()) @@ -3744,6 +3677,7 @@ def _create_or_update_knowledge_source( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -3765,7 +3699,7 @@ def _create_or_update_knowledge_source( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.KnowledgeSource, response.json()) @@ -3875,6 +3809,7 @@ def get_knowledge_source(self, name: str, **kwargs: Any) -> _models1.KnowledgeSo } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -3896,7 +3831,7 @@ def get_knowledge_source(self, name: str, **kwargs: Any) -> _models1.KnowledgeSo raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.KnowledgeSource, response.json()) @@ -3965,7 +3900,10 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models1.KnowledgeSource], deserialized.get("value", [])) + list_of_elem = _deserialize( + list[_models1.KnowledgeSource], + deserialized.get("value", []), + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, iter(list_of_elem) @@ -4086,6 +4024,7 @@ def create_knowledge_source( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4107,7 +4046,7 @@ def create_knowledge_source( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.KnowledgeSource, response.json()) @@ -4150,6 +4089,7 @@ def get_knowledge_source_status(self, name: str, **kwargs: Any) -> _knowledgebas } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4171,7 +4111,7 @@ def get_knowledge_source_status(self, name: str, **kwargs: Any) -> _knowledgebas raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_knowledgebases_models3.KnowledgeSourceStatus, response.json()) @@ -4211,6 +4151,7 @@ def get_service_statistics(self, **kwargs: Any) -> _models1.SearchServiceStatist } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4232,7 +4173,7 @@ def get_service_statistics(self, **kwargs: Any) -> _models1.SearchServiceStatist raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchServiceStatistics, response.json()) @@ -4241,95 +4182,8 @@ def get_service_statistics(self, **kwargs: Any) -> _models1.SearchServiceStatist return deserialized # type: ignore - @distributed_trace - def list_index_stats_summary(self, **kwargs: Any) -> ItemPaged["_models1.IndexStatisticsSummary"]: - """Retrieves a summary of statistics for all indexes in the search service. - - :return: An iterator like instance of IndexStatisticsSummary - :rtype: - ~azure.core.paging.ItemPaged[~azure.search.documents.indexes.models.IndexStatisticsSummary] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[list[_models1.IndexStatisticsSummary]] = kwargs.pop("cls", None) - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_search_index_list_index_stats_summary_request( - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models1.IndexStatisticsSummary], deserialized.get("value", [])) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return None, iter(list_of_elem) - - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models2.ErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return ItemPaged(get_next, extract_data) - - -class _SearchIndexerClientOperationsMixin( # pylint: disable=too-many-public-methods +class _SearchIndexerClientOperationsMixin( ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], SearchIndexerClientConfiguration] ): @@ -4339,7 +4193,6 @@ def _create_or_update_data_source_connection( name: str, data_source: _models1.SearchIndexerDataSourceConnection, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -4351,7 +4204,6 @@ def _create_or_update_data_source_connection( name: str, data_source: JSON, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -4363,7 +4215,6 @@ def _create_or_update_data_source_connection( name: str, data_source: IO[bytes], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -4376,7 +4227,6 @@ def _create_or_update_data_source_connection( name: str, data_source: Union[_models1.SearchIndexerDataSourceConnection, JSON, IO[bytes]], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any, @@ -4389,9 +4239,6 @@ def _create_or_update_data_source_connection( following types: SearchIndexerDataSourceConnection, JSON, IO[bytes] Required. :type data_source: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection or JSON or IO[bytes] - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -4432,7 +4279,6 @@ def _create_or_update_data_source_connection( _request = build_search_indexer_create_or_update_data_source_connection_request( name=name, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, etag=etag, match_condition=match_condition, prefer=prefer, @@ -4447,6 +4293,7 @@ def _create_or_update_data_source_connection( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4468,7 +4315,7 @@ def _create_or_update_data_source_connection( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndexerDataSourceConnection, response.json()) @@ -4579,6 +4426,7 @@ def get_data_source_connection(self, name: str, **kwargs: Any) -> _models1.Searc } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4600,7 +4448,7 @@ def get_data_source_connection(self, name: str, **kwargs: Any) -> _models1.Searc raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndexerDataSourceConnection, response.json()) @@ -4647,6 +4495,7 @@ def _get_data_source_connections( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4668,7 +4517,7 @@ def _get_data_source_connections( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models1._models.ListDataSourcesResult, response.json() # pylint: disable=protected-access @@ -4783,6 +4632,7 @@ def create_data_source_connection( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4804,7 +4654,7 @@ def create_data_source_connection( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndexerDataSourceConnection, response.json()) @@ -4865,206 +4715,6 @@ def reset_indexer(self, name: str, **kwargs: Any) -> None: # pylint: disable=in if cls: return cls(pipeline_response, None, {}) # type: ignore - @overload - def _resync( - self, - name: str, - indexer_resync: _models1.IndexerResyncBody, - *, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: ... - @overload - def _resync( - self, name: str, indexer_resync: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> None: ... - @overload - def _resync( - self, name: str, indexer_resync: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> None: ... - - @distributed_trace - def _resync( # pylint: disable=inconsistent-return-statements - self, name: str, indexer_resync: Union[_models1.IndexerResyncBody, JSON, IO[bytes]], **kwargs: Any - ) -> None: - """Resync selective options from the datasource to be re-ingested by the indexer.". - - :param name: The name of the indexer. Required. - :type name: str - :param indexer_resync: The definition of the indexer resync options. Is one of the following - types: IndexerResyncBody, JSON, IO[bytes] Required. - :type indexer_resync: ~azure.search.documents.indexes.models.IndexerResyncBody or JSON or - IO[bytes] - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[None] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(indexer_resync, (IOBase, bytes)): - _content = indexer_resync - else: - _content = json.dumps(indexer_resync, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_search_indexer_resync_request( - name=name, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models2.ErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @overload - def _reset_documents( - self, - name: str, - keys_or_ids: Optional[_models1.DocumentKeysOrIds] = None, - *, - overwrite: Optional[bool] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: ... - @overload - def _reset_documents( - self, - name: str, - keys_or_ids: Optional[JSON] = None, - *, - overwrite: Optional[bool] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: ... - @overload - def _reset_documents( - self, - name: str, - keys_or_ids: Optional[IO[bytes]] = None, - *, - overwrite: Optional[bool] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> None: ... - - @distributed_trace - def _reset_documents( # pylint: disable=inconsistent-return-statements - self, - name: str, - keys_or_ids: Optional[Union[_models1.DocumentKeysOrIds, JSON, IO[bytes]]] = None, - *, - overwrite: Optional[bool] = None, - **kwargs: Any, - ) -> None: - """Resets specific documents in the datasource to be selectively re-ingested by the indexer. - - :param name: The name of the indexer. Required. - :type name: str - :param keys_or_ids: The keys or ids of the documents to be re-ingested. If keys are provided, - the document key field must be specified in the indexer configuration. If ids are provided, the - document key field is ignored. Is one of the following types: DocumentKeysOrIds, JSON, - IO[bytes] Default value is None. - :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds or JSON or - IO[bytes] - :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the - keys or ids in this payload will be queued to be re-ingested. Default value is None. - :paramtype overwrite: bool - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - content_type = content_type if keys_or_ids else None - cls: ClsType[None] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" if keys_or_ids else None - _content = None - if isinstance(keys_or_ids, (IOBase, bytes)): - _content = keys_or_ids - else: - if keys_or_ids is not None: - _content = json.dumps(keys_or_ids, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - else: - _content = None - - _request = build_search_indexer_reset_documents_request( - name=name, - overwrite=overwrite, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models2.ErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - @distributed_trace def run_indexer(self, name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements """Runs an indexer on-demand. @@ -5123,8 +4773,6 @@ def _create_or_update_indexer( name: str, indexer: _models1.SearchIndexer, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -5136,8 +4784,6 @@ def _create_or_update_indexer( name: str, indexer: JSON, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -5149,8 +4795,6 @@ def _create_or_update_indexer( name: str, indexer: IO[bytes], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -5163,8 +4807,6 @@ def _create_or_update_indexer( name: str, indexer: Union[_models1.SearchIndexer, JSON, IO[bytes]], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any, @@ -5176,12 +4818,6 @@ def _create_or_update_indexer( :param indexer: The definition of the indexer to create or update. Is one of the following types: SearchIndexer, JSON, IO[bytes] Required. :type indexer: ~azure.search.documents.indexes.models.SearchIndexer or JSON or IO[bytes] - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool - :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :paramtype disable_cache_reprocessing_change_detection: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -5221,8 +4857,6 @@ def _create_or_update_indexer( _request = build_search_indexer_create_or_update_indexer_request( name=name, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, etag=etag, match_condition=match_condition, prefer=prefer, @@ -5237,6 +4871,7 @@ def _create_or_update_indexer( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -5258,7 +4893,7 @@ def _create_or_update_indexer( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndexer, response.json()) @@ -5368,6 +5003,7 @@ def get_indexer(self, name: str, **kwargs: Any) -> _models1.SearchIndexer: } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -5389,7 +5025,7 @@ def get_indexer(self, name: str, **kwargs: Any) -> _models1.SearchIndexer: raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndexer, response.json()) @@ -5436,6 +5072,7 @@ def _get_indexers( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -5457,7 +5094,7 @@ def _get_indexers( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models1._models.ListIndexersResult, response.json() # pylint: disable=protected-access @@ -5562,6 +5199,7 @@ def create_indexer( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -5583,7 +5221,7 @@ def create_indexer( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndexer, response.json()) @@ -5626,6 +5264,7 @@ def get_indexer_status(self, name: str, **kwargs: Any) -> _models1.SearchIndexer } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -5647,7 +5286,7 @@ def get_indexer_status(self, name: str, **kwargs: Any) -> _models1.SearchIndexer raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndexerStatus, response.json()) @@ -5662,8 +5301,6 @@ def _create_or_update_skillset( name: str, skillset: _models1.SearchIndexerSkillset, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -5675,8 +5312,6 @@ def _create_or_update_skillset( name: str, skillset: JSON, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -5688,8 +5323,6 @@ def _create_or_update_skillset( name: str, skillset: IO[bytes], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -5702,8 +5335,6 @@ def _create_or_update_skillset( name: str, skillset: Union[_models1.SearchIndexerSkillset, JSON, IO[bytes]], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any, @@ -5716,12 +5347,6 @@ def _create_or_update_skillset( service. Is one of the following types: SearchIndexerSkillset, JSON, IO[bytes] Required. :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset or JSON or IO[bytes] - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool - :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :paramtype disable_cache_reprocessing_change_detection: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -5761,8 +5386,6 @@ def _create_or_update_skillset( _request = build_search_indexer_create_or_update_skillset_request( name=name, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, etag=etag, match_condition=match_condition, prefer=prefer, @@ -5777,6 +5400,7 @@ def _create_or_update_skillset( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -5798,7 +5422,7 @@ def _create_or_update_skillset( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndexerSkillset, response.json()) @@ -5908,6 +5532,7 @@ def get_skillset(self, name: str, **kwargs: Any) -> _models1.SearchIndexerSkills } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -5929,7 +5554,7 @@ def get_skillset(self, name: str, **kwargs: Any) -> _models1.SearchIndexerSkills raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndexerSkillset, response.json()) @@ -5976,6 +5601,7 @@ def _get_skillsets( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -5997,7 +5623,7 @@ def _get_skillsets( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models1._models.ListSkillsetsResult, response.json() # pylint: disable=protected-access @@ -6106,6 +5732,7 @@ def create_skillset( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -6127,7 +5754,7 @@ def create_skillset( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.SearchIndexerSkillset, response.json()) @@ -6135,83 +5762,3 @@ def create_skillset( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - - @overload - def _reset_skills( - self, name: str, skill_names: _models1.SkillNames, *, content_type: str = "application/json", **kwargs: Any - ) -> None: ... - @overload - def _reset_skills( - self, name: str, skill_names: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> None: ... - @overload - def _reset_skills( - self, name: str, skill_names: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> None: ... - - @distributed_trace - def _reset_skills( # pylint: disable=inconsistent-return-statements - self, name: str, skill_names: Union[_models1.SkillNames, JSON, IO[bytes]], **kwargs: Any - ) -> None: - """Reset an existing skillset in a search service. - - :param name: The name of the skillset. Required. - :type name: str - :param skill_names: The names of the skills to reset. If not specified, all skills in the - skillset will be reset. Is one of the following types: SkillNames, JSON, IO[bytes] Required. - :type skill_names: ~azure.search.documents.indexes.models.SkillNames or JSON or IO[bytes] - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[None] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(skill_names, (IOBase, bytes)): - _content = skill_names - else: - _content = json.dumps(skill_names, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_search_indexer_reset_skills_request( - name=name, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models2.ErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_patch.py index 139fb1ad179b..5ec918657b5d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_patch.py @@ -50,8 +50,6 @@ def _convert_index_response(response: _SearchIndexResponse) -> _models.SearchInd similarity=response.similarity, semantic_search=response.semantic, vector_search=response.vector_search, - permission_filter_option=response.permission_filter_option, - purview_enabled=response.purview_enabled, e_tag=response.e_tag, ) @@ -534,7 +532,6 @@ def create_or_update_data_source_connection( self, data_source_connection: _models.SearchIndexerDataSourceConnection, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, match_condition: MatchConditions = MatchConditions.Unconditionally, **kwargs: Any, ) -> _models.SearchIndexerDataSourceConnection: @@ -542,9 +539,6 @@ def create_or_update_data_source_connection( :param data_source_connection: The SearchIndexerDataSourceConnection object to create or update. Required. :type data_source_connection: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool :keyword match_condition: The match condition to use upon the etag. Default value is None. :paramtype match_condition: ~azure.core.MatchConditions :return: SearchIndexerDataSourceConnection @@ -557,7 +551,6 @@ def create_or_update_data_source_connection( prefer="return=representation", etag=data_source_connection.e_tag, match_condition=match_condition, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, **kwargs, ) @@ -599,8 +592,6 @@ def create_or_update_indexer( self, indexer: _models.SearchIndexer, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, match_condition: MatchConditions = MatchConditions.Unconditionally, **kwargs: Any, ) -> _models.SearchIndexer: @@ -608,12 +599,6 @@ def create_or_update_indexer( :param indexer: The SearchIndexer object to create or update. Required. :type indexer: ~azure.search.documents.indexes.models.SearchIndexer - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool - :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :paramtype disable_cache_reprocessing_change_detection: bool :keyword match_condition: The match condition to use upon the etag. Default value is None. :paramtype match_condition: ~azure.core.MatchConditions :return: SearchIndexer @@ -626,8 +611,6 @@ def create_or_update_indexer( prefer="return=representation", etag=indexer.e_tag, match_condition=match_condition, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, **kwargs, ) @@ -669,8 +652,6 @@ def create_or_update_skillset( self, skillset: _models.SearchIndexerSkillset, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, match_condition: MatchConditions = MatchConditions.Unconditionally, **kwargs: Any, ) -> _models.SearchIndexerSkillset: @@ -678,12 +659,6 @@ def create_or_update_skillset( :param skillset: The SearchIndexerSkillset object to create or update. Required. :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool - :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :paramtype disable_cache_reprocessing_change_detection: bool :keyword match_condition: The match condition to use upon the etag. Default value is None. :paramtype match_condition: ~azure.core.MatchConditions :return: SearchIndexerSkillset @@ -696,8 +671,6 @@ def create_or_update_skillset( prefer="return=representation", etag=skillset.e_tag, match_condition=match_condition, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, **kwargs, ) @@ -801,94 +774,6 @@ def get_skillset_names(self, **kwargs: Any) -> List[str]: result = self.get_skillsets(**kwargs) return [x.name for x in result] - @distributed_trace - def reset_documents( - self, - indexer: Union[str, _models.SearchIndexer], - keys_or_ids: _models.DocumentKeysOrIds, - *, - overwrite: bool = False, - **kwargs: Any, - ) -> None: - """Resets specific documents in the datasource to be selectively re-ingested by the indexer. - - :param indexer: The indexer to reset documents for. Can be the indexer name or a SearchIndexer object. - :type indexer: str or ~azure.search.documents.indexes.models.SearchIndexer - :param keys_or_ids: The document keys or ids to reset. - :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds - :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the - keys or ids in this payload will be queued to be re-ingested. Default value is False. - :paramtype overwrite: bool - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - try: - name: str = indexer.name # type: ignore - except AttributeError: - name = indexer # type: ignore - return self._reset_documents( - name=name, - keys_or_ids=keys_or_ids, - overwrite=overwrite, - **kwargs, - ) - - @distributed_trace - def reset_skills( - self, - skillset: Union[str, _models.SearchIndexerSkillset], - skill_names: List[str], - **kwargs: Any, - ) -> None: - """Reset an existing skillset in a search service. - - :param skillset: The skillset to reset skills for. Can be the skillset name or a SearchIndexerSkillset object. - :type skillset: str or ~azure.search.documents.indexes.models.SearchIndexerSkillset - :param skill_names: The names of the skills to reset. - :type skill_names: list[str] - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - try: - name: str = skillset.name # type: ignore - except AttributeError: - name = skillset # type: ignore - return self._reset_skills( - name=name, - skill_names=_models.SkillNames(skill_names=skill_names), - **kwargs, - ) - - @distributed_trace - def resync( - self, - indexer: Union[str, _models.SearchIndexer], - indexer_resync_options: List[Union[str, _models.IndexerResyncOption]], - **kwargs: Any, - ) -> None: - """Resync selective options from the datasource to be re-ingested by the indexer. - - :param indexer: The indexer to resync. Can be the indexer name or a SearchIndexer object. - :type indexer: str or ~azure.search.documents.indexes.models.SearchIndexer - :param indexer_resync_options: Re-sync options that have been pre-defined from data source. - :type indexer_resync_options: list[str or ~azure.search.documents.indexes.models.IndexerResyncOption] - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - try: - name: str = indexer.name # type: ignore - except AttributeError: - name = indexer # type: ignore - indexer_resync = _models.IndexerResyncBody(options=indexer_resync_options) - return self._resync( - name=name, - indexer_resync=indexer_resync, - **kwargs, - ) - __all__: list[str] = [ "_SearchIndexClientOperationsMixin", diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_utils/model_base.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_utils/model_base.py index c402af2afc63..f8511ab0c707 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_utils/model_base.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_utils/model_base.py @@ -22,7 +22,7 @@ from datetime import datetime, date, time, timedelta, timezone from json import JSONEncoder import xml.etree.ElementTree as ET -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint: disable=import-error from typing_extensions import Self import isodate from azure.core.exceptions import DeserializationError @@ -515,6 +515,8 @@ def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: return self._data.setdefault(key, default) def __eq__(self, other: typing.Any) -> bool: + if isinstance(other, _MyMutableMapping): + return self._data == other._data try: other_model = self.__class__(other) except Exception: @@ -628,6 +630,9 @@ def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: if len(items) > 0: existed_attr_keys.append(xml_name) dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + elif not rf._is_optional: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = [] continue # text element is primitive type @@ -690,7 +695,7 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: cls._attr_to_rest_field: dict[str, _RestField] = dict(attr_to_rest_field.items()) cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") - return super().__new__(cls) + return super().__new__(cls) # pylint: disable=no-value-for-parameter def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: for base in cls.__bases__: @@ -889,6 +894,8 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-retur # is it optional? try: if any(a is _NONE_TYPE for a in annotation.__args__): # pyright: ignore + if rf: + rf._is_optional = True if len(annotation.__args__) <= 2: # pyright: ignore if_obj_deserializer = _get_deserialize_callable_from_annotation( next(a for a in annotation.__args__ if a is not _NONE_TYPE), module, rf # pyright: ignore @@ -981,16 +988,20 @@ def _deserialize_with_callable( return float(value.text) if value.text else None if deserializer is bool: return value.text == "true" if value.text else None + if deserializer and deserializer in _DESERIALIZE_MAPPING.values(): + return deserializer(value.text) if value.text else None + if deserializer and deserializer in _DESERIALIZE_MAPPING_WITHFORMAT.values(): + return deserializer(value.text) if value.text else None if deserializer is None: return value if deserializer in [int, float, bool]: return deserializer(value) if isinstance(deserializer, CaseInsensitiveEnumMeta): try: - return deserializer(value) + return deserializer(value.text if isinstance(value, ET.Element) else value) except ValueError: # for unknown value, return raw value - return value + return value.text if isinstance(value, ET.Element) else value if isinstance(deserializer, type) and issubclass(deserializer, Model): return deserializer._deserialize(value, []) return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) @@ -1043,6 +1054,7 @@ def _failsafe_deserialize_xml( return None +# pylint: disable=too-many-instance-attributes class _RestField: def __init__( self, @@ -1062,6 +1074,7 @@ def __init__( self._is_discriminator = is_discriminator self._visibility = visibility self._is_model = False + self._is_optional = False self._default = default self._format = format self._is_multipart_file_input = is_multipart_file_input diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_client.py index abfade0135cc..c0178771756b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_client.py @@ -26,15 +26,15 @@ class SearchIndexClient(_SearchIndexClientOperationsMixin): """SearchIndexClient. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ @@ -109,15 +109,15 @@ async def __aexit__(self, *exc_details: Any) -> None: class SearchIndexerClient(_SearchIndexerClientOperationsMixin): """SearchIndexerClient. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_configuration.py index ecf97247abc0..bc0628309b08 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_configuration.py @@ -23,22 +23,22 @@ class SearchIndexClientConfiguration: # pylint: disable=too-many-instance-attri Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - api_version: str = kwargs.pop("api_version", "2025-11-01-preview") + api_version: str = kwargs.pop("api_version", "2026-04-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") @@ -80,22 +80,22 @@ class SearchIndexerClientConfiguration: # pylint: disable=too-many-instance-att Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - api_version: str = kwargs.pop("api_version", "2025-11-01-preview") + api_version: str = kwargs.pop("api_version", "2026-04-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_operations/_operations.py index fe204de37c42..2283f5c07552 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_operations/_operations.py @@ -6,7 +6,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint: disable=import-error from io import IOBase import json from typing import Any, Callable, IO, Literal, Optional, TypeVar, Union, overload @@ -63,7 +63,6 @@ build_search_index_get_synonym_map_request, build_search_index_get_synonym_maps_request, build_search_index_list_aliases_request, - build_search_index_list_index_stats_summary_request, build_search_index_list_indexes_request, build_search_index_list_indexes_with_selected_properties_request, build_search_index_list_knowledge_bases_request, @@ -84,10 +83,7 @@ build_search_indexer_get_indexers_request, build_search_indexer_get_skillset_request, build_search_indexer_get_skillsets_request, - build_search_indexer_reset_documents_request, build_search_indexer_reset_indexer_request, - build_search_indexer_reset_skills_request, - build_search_indexer_resync_request, build_search_indexer_run_indexer_request, ) from .._configuration import SearchIndexClientConfiguration, SearchIndexerClientConfiguration @@ -205,6 +201,7 @@ async def _create_or_update_synonym_map( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -226,7 +223,7 @@ async def _create_or_update_synonym_map( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SynonymMap, response.json()) @@ -336,6 +333,7 @@ async def get_synonym_map(self, name: str, **kwargs: Any) -> _models2.SynonymMap } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -357,7 +355,7 @@ async def get_synonym_map(self, name: str, **kwargs: Any) -> _models2.SynonymMap raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SynonymMap, response.json()) @@ -404,6 +402,7 @@ async def _get_synonym_maps( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -425,7 +424,7 @@ async def _get_synonym_maps( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models2._models.ListSynonymMapsResult, response.json() # pylint: disable=protected-access @@ -530,6 +529,7 @@ async def create_synonym_map( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -551,7 +551,7 @@ async def create_synonym_map( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SynonymMap, response.json()) @@ -675,6 +675,7 @@ async def _create_or_update_index( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -696,7 +697,7 @@ async def _create_or_update_index( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndex, response.json()) @@ -808,6 +809,7 @@ async def get_index(self, name: str, **kwargs: Any) -> _models2.SearchIndex: } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -829,7 +831,7 @@ async def get_index(self, name: str, **kwargs: Any) -> _models2.SearchIndex: raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndex, response.json()) @@ -899,7 +901,10 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models2.SearchIndex], deserialized.get("value", [])) + list_of_elem = _deserialize( + list[_models2.SearchIndex], + deserialized.get("value", []), + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, AsyncList(list_of_elem) @@ -992,9 +997,11 @@ def prepare_request(next_link=None): return _request async def extract_data(pipeline_response): - # pylint: disable=protected-access deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models2._models.SearchIndexResponse], deserialized.get("value", [])) + list_of_elem = _deserialize( + list[_models2._models.SearchIndexResponse], # pylint: disable=protected-access + deserialized.get("value", []), + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, AsyncList(list_of_elem) @@ -1114,6 +1121,7 @@ async def create_index( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1135,7 +1143,7 @@ async def create_index( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndex, response.json()) @@ -1179,6 +1187,7 @@ async def _get_index_statistics(self, name: str, **kwargs: Any) -> _models2.GetI } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1200,7 +1209,7 @@ async def _get_index_statistics(self, name: str, **kwargs: Any) -> _models2.GetI raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.GetIndexStatisticsResult, response.json()) @@ -1271,6 +1280,7 @@ async def _analyze_text( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1292,7 +1302,7 @@ async def _analyze_text( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.AnalyzeResult, response.json()) @@ -1405,6 +1415,7 @@ async def _create_or_update_alias( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1426,7 +1437,7 @@ async def _create_or_update_alias( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchAlias, response.json()) @@ -1537,6 +1548,7 @@ async def get_alias(self, name: str, **kwargs: Any) -> _models2.SearchAlias: } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1558,7 +1570,7 @@ async def get_alias(self, name: str, **kwargs: Any) -> _models2.SearchAlias: raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchAlias, response.json()) @@ -1628,7 +1640,10 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models2.SearchAlias], deserialized.get("value", [])) + list_of_elem = _deserialize( + list[_models2.SearchAlias], + deserialized.get("value", []), + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, AsyncList(list_of_elem) @@ -1748,6 +1763,7 @@ async def create_alias( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1769,7 +1785,7 @@ async def create_alias( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchAlias, response.json()) @@ -1882,6 +1898,7 @@ async def _create_or_update_knowledge_base( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -1903,7 +1920,7 @@ async def _create_or_update_knowledge_base( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.KnowledgeBase, response.json()) @@ -2013,6 +2030,7 @@ async def get_knowledge_base(self, name: str, **kwargs: Any) -> _models2.Knowled } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2034,7 +2052,7 @@ async def get_knowledge_base(self, name: str, **kwargs: Any) -> _models2.Knowled raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.KnowledgeBase, response.json()) @@ -2104,7 +2122,10 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models2.KnowledgeBase], deserialized.get("value", [])) + list_of_elem = _deserialize( + list[_models2.KnowledgeBase], + deserialized.get("value", []), + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, AsyncList(list_of_elem) @@ -2224,6 +2245,7 @@ async def create_knowledge_base( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2245,7 +2267,7 @@ async def create_knowledge_base( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.KnowledgeBase, response.json()) @@ -2359,6 +2381,7 @@ async def _create_or_update_knowledge_source( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2380,7 +2403,7 @@ async def _create_or_update_knowledge_source( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.KnowledgeSource, response.json()) @@ -2490,6 +2513,7 @@ async def get_knowledge_source(self, name: str, **kwargs: Any) -> _models2.Knowl } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2511,7 +2535,7 @@ async def get_knowledge_source(self, name: str, **kwargs: Any) -> _models2.Knowl raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.KnowledgeSource, response.json()) @@ -2581,7 +2605,10 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models2.KnowledgeSource], deserialized.get("value", [])) + list_of_elem = _deserialize( + list[_models2.KnowledgeSource], + deserialized.get("value", []), + ) if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, AsyncList(list_of_elem) @@ -2702,6 +2729,7 @@ async def create_knowledge_source( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2723,7 +2751,7 @@ async def create_knowledge_source( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.KnowledgeSource, response.json()) @@ -2768,6 +2796,7 @@ async def get_knowledge_source_status( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2789,7 +2818,7 @@ async def get_knowledge_source_status( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_knowledgebases_models4.KnowledgeSourceStatus, response.json()) @@ -2829,6 +2858,7 @@ async def get_service_statistics(self, **kwargs: Any) -> _models2.SearchServiceS } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -2850,7 +2880,7 @@ async def get_service_statistics(self, **kwargs: Any) -> _models2.SearchServiceS raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchServiceStatistics, response.json()) @@ -2859,95 +2889,8 @@ async def get_service_statistics(self, **kwargs: Any) -> _models2.SearchServiceS return deserialized # type: ignore - @distributed_trace - def list_index_stats_summary(self, **kwargs: Any) -> AsyncItemPaged["_models2.IndexStatisticsSummary"]: - """Retrieves a summary of statistics for all indexes in the search service. - - :return: An iterator like instance of IndexStatisticsSummary - :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.indexes.models.IndexStatisticsSummary] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[list[_models2.IndexStatisticsSummary]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - _request = build_search_index_list_index_stats_summary_request( - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - async def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(list[_models2.IndexStatisticsSummary], deserialized.get("value", [])) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models3.ErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return AsyncItemPaged(get_next, extract_data) - - -class _SearchIndexerClientOperationsMixin( # pylint: disable=too-many-public-methods +class _SearchIndexerClientOperationsMixin( ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], SearchIndexerClientConfiguration] ): @@ -2957,7 +2900,6 @@ async def _create_or_update_data_source_connection( name: str, data_source: _models2.SearchIndexerDataSourceConnection, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -2969,7 +2911,6 @@ async def _create_or_update_data_source_connection( name: str, data_source: JSON, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -2981,7 +2922,6 @@ async def _create_or_update_data_source_connection( name: str, data_source: IO[bytes], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -2994,7 +2934,6 @@ async def _create_or_update_data_source_connection( name: str, data_source: Union[_models2.SearchIndexerDataSourceConnection, JSON, IO[bytes]], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -3007,9 +2946,6 @@ async def _create_or_update_data_source_connection( following types: SearchIndexerDataSourceConnection, JSON, IO[bytes] Required. :type data_source: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection or JSON or IO[bytes] - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -3050,7 +2986,6 @@ async def _create_or_update_data_source_connection( _request = build_search_indexer_create_or_update_data_source_connection_request( name=name, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, etag=etag, match_condition=match_condition, prefer=prefer, @@ -3065,6 +3000,7 @@ async def _create_or_update_data_source_connection( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -3086,7 +3022,7 @@ async def _create_or_update_data_source_connection( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndexerDataSourceConnection, response.json()) @@ -3197,6 +3133,7 @@ async def get_data_source_connection(self, name: str, **kwargs: Any) -> _models2 } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -3218,7 +3155,7 @@ async def get_data_source_connection(self, name: str, **kwargs: Any) -> _models2 raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndexerDataSourceConnection, response.json()) @@ -3265,6 +3202,7 @@ async def _get_data_source_connections( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -3286,7 +3224,7 @@ async def _get_data_source_connections( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models2._models.ListDataSourcesResult, response.json() # pylint: disable=protected-access @@ -3401,6 +3339,7 @@ async def create_data_source_connection( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -3422,7 +3361,7 @@ async def create_data_source_connection( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndexerDataSourceConnection, response.json()) @@ -3483,206 +3422,6 @@ async def reset_indexer(self, name: str, **kwargs: Any) -> None: if cls: return cls(pipeline_response, None, {}) # type: ignore - @overload - async def _resync( - self, - name: str, - indexer_resync: _models2.IndexerResyncBody, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> None: ... - @overload - async def _resync( - self, name: str, indexer_resync: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> None: ... - @overload - async def _resync( - self, name: str, indexer_resync: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> None: ... - - @distributed_trace_async - async def _resync( - self, name: str, indexer_resync: Union[_models2.IndexerResyncBody, JSON, IO[bytes]], **kwargs: Any - ) -> None: - """Resync selective options from the datasource to be re-ingested by the indexer.". - - :param name: The name of the indexer. Required. - :type name: str - :param indexer_resync: The definition of the indexer resync options. Is one of the following - types: IndexerResyncBody, JSON, IO[bytes] Required. - :type indexer_resync: ~azure.search.documents.indexes.models.IndexerResyncBody or JSON or - IO[bytes] - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[None] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(indexer_resync, (IOBase, bytes)): - _content = indexer_resync - else: - _content = json.dumps(indexer_resync, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_search_indexer_resync_request( - name=name, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models3.ErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @overload - async def _reset_documents( - self, - name: str, - keys_or_ids: Optional[_models2.DocumentKeysOrIds] = None, - *, - overwrite: Optional[bool] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> None: ... - @overload - async def _reset_documents( - self, - name: str, - keys_or_ids: Optional[JSON] = None, - *, - overwrite: Optional[bool] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> None: ... - @overload - async def _reset_documents( - self, - name: str, - keys_or_ids: Optional[IO[bytes]] = None, - *, - overwrite: Optional[bool] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> None: ... - - @distributed_trace_async - async def _reset_documents( - self, - name: str, - keys_or_ids: Optional[Union[_models2.DocumentKeysOrIds, JSON, IO[bytes]]] = None, - *, - overwrite: Optional[bool] = None, - **kwargs: Any - ) -> None: - """Resets specific documents in the datasource to be selectively re-ingested by the indexer. - - :param name: The name of the indexer. Required. - :type name: str - :param keys_or_ids: The keys or ids of the documents to be re-ingested. If keys are provided, - the document key field must be specified in the indexer configuration. If ids are provided, the - document key field is ignored. Is one of the following types: DocumentKeysOrIds, JSON, - IO[bytes] Default value is None. - :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds or JSON or - IO[bytes] - :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the - keys or ids in this payload will be queued to be re-ingested. Default value is None. - :paramtype overwrite: bool - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - content_type = content_type if keys_or_ids else None - cls: ClsType[None] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" if keys_or_ids else None - _content = None - if isinstance(keys_or_ids, (IOBase, bytes)): - _content = keys_or_ids - else: - if keys_or_ids is not None: - _content = json.dumps(keys_or_ids, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - else: - _content = None - - _request = build_search_indexer_reset_documents_request( - name=name, - overwrite=overwrite, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models3.ErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - @distributed_trace_async async def run_indexer(self, name: str, **kwargs: Any) -> None: """Runs an indexer on-demand. @@ -3741,8 +3480,6 @@ async def _create_or_update_indexer( name: str, indexer: _models2.SearchIndexer, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -3754,8 +3491,6 @@ async def _create_or_update_indexer( name: str, indexer: JSON, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -3767,8 +3502,6 @@ async def _create_or_update_indexer( name: str, indexer: IO[bytes], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -3781,8 +3514,6 @@ async def _create_or_update_indexer( name: str, indexer: Union[_models2.SearchIndexer, JSON, IO[bytes]], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -3794,12 +3525,6 @@ async def _create_or_update_indexer( :param indexer: The definition of the indexer to create or update. Is one of the following types: SearchIndexer, JSON, IO[bytes] Required. :type indexer: ~azure.search.documents.indexes.models.SearchIndexer or JSON or IO[bytes] - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool - :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :paramtype disable_cache_reprocessing_change_detection: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -3839,8 +3564,6 @@ async def _create_or_update_indexer( _request = build_search_indexer_create_or_update_indexer_request( name=name, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, etag=etag, match_condition=match_condition, prefer=prefer, @@ -3855,6 +3578,7 @@ async def _create_or_update_indexer( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -3876,7 +3600,7 @@ async def _create_or_update_indexer( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndexer, response.json()) @@ -3986,6 +3710,7 @@ async def get_indexer(self, name: str, **kwargs: Any) -> _models2.SearchIndexer: } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4007,7 +3732,7 @@ async def get_indexer(self, name: str, **kwargs: Any) -> _models2.SearchIndexer: raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndexer, response.json()) @@ -4054,6 +3779,7 @@ async def _get_indexers( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4075,7 +3801,7 @@ async def _get_indexers( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models2._models.ListIndexersResult, response.json() # pylint: disable=protected-access @@ -4180,6 +3906,7 @@ async def create_indexer( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4201,7 +3928,7 @@ async def create_indexer( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndexer, response.json()) @@ -4244,6 +3971,7 @@ async def get_indexer_status(self, name: str, **kwargs: Any) -> _models2.SearchI } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4265,7 +3993,7 @@ async def get_indexer_status(self, name: str, **kwargs: Any) -> _models2.SearchI raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndexerStatus, response.json()) @@ -4280,8 +4008,6 @@ async def _create_or_update_skillset( name: str, skillset: _models2.SearchIndexerSkillset, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -4293,8 +4019,6 @@ async def _create_or_update_skillset( name: str, skillset: JSON, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -4306,8 +4030,6 @@ async def _create_or_update_skillset( name: str, skillset: IO[bytes], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, content_type: str = "application/json", etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, @@ -4320,8 +4042,6 @@ async def _create_or_update_skillset( name: str, skillset: Union[_models2.SearchIndexerSkillset, JSON, IO[bytes]], *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any @@ -4334,12 +4054,6 @@ async def _create_or_update_skillset( service. Is one of the following types: SearchIndexerSkillset, JSON, IO[bytes] Required. :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset or JSON or IO[bytes] - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool - :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :paramtype disable_cache_reprocessing_change_detection: bool :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is None. :paramtype etag: str @@ -4379,8 +4093,6 @@ async def _create_or_update_skillset( _request = build_search_indexer_create_or_update_skillset_request( name=name, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, etag=etag, match_condition=match_condition, prefer=prefer, @@ -4395,6 +4107,7 @@ async def _create_or_update_skillset( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4416,7 +4129,7 @@ async def _create_or_update_skillset( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndexerSkillset, response.json()) @@ -4526,6 +4239,7 @@ async def get_skillset(self, name: str, **kwargs: Any) -> _models2.SearchIndexer } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4547,7 +4261,7 @@ async def get_skillset(self, name: str, **kwargs: Any) -> _models2.SearchIndexer raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndexerSkillset, response.json()) @@ -4594,6 +4308,7 @@ async def _get_skillsets( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4615,7 +4330,7 @@ async def _get_skillsets( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize( _models2._models.ListSkillsetsResult, response.json() # pylint: disable=protected-access @@ -4724,6 +4439,7 @@ async def create_skillset( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -4745,7 +4461,7 @@ async def create_skillset( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.SearchIndexerSkillset, response.json()) @@ -4753,83 +4469,3 @@ async def create_skillset( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - - @overload - async def _reset_skills( - self, name: str, skill_names: _models2.SkillNames, *, content_type: str = "application/json", **kwargs: Any - ) -> None: ... - @overload - async def _reset_skills( - self, name: str, skill_names: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> None: ... - @overload - async def _reset_skills( - self, name: str, skill_names: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> None: ... - - @distributed_trace_async - async def _reset_skills( - self, name: str, skill_names: Union[_models2.SkillNames, JSON, IO[bytes]], **kwargs: Any - ) -> None: - """Reset an existing skillset in a search service. - - :param name: The name of the skillset. Required. - :type name: str - :param skill_names: The names of the skills to reset. If not specified, all skills in the - skillset will be reset. Is one of the following types: SkillNames, JSON, IO[bytes] Required. - :type skill_names: ~azure.search.documents.indexes.models.SkillNames or JSON or IO[bytes] - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[None] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(skill_names, (IOBase, bytes)): - _content = skill_names - else: - _content = json.dumps(skill_names, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_search_indexer_reset_skills_request( - name=name, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize( - _models3.ErrorResponse, - response, - ) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_operations/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_operations/_patch.py index 3a0b6b321e96..5acb7828de29 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_operations/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_operations/_patch.py @@ -513,7 +513,6 @@ async def create_or_update_data_source_connection( self, data_source_connection: _models.SearchIndexerDataSourceConnection, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, match_condition: MatchConditions = MatchConditions.Unconditionally, **kwargs: Any, ) -> _models.SearchIndexerDataSourceConnection: @@ -521,9 +520,6 @@ async def create_or_update_data_source_connection( :param data_source_connection: The SearchIndexerDataSourceConnection object to create or update. Required. :type data_source_connection: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool :keyword match_condition: The match condition to use upon the etag. Default value is None. :paramtype match_condition: ~azure.core.MatchConditions :return: SearchIndexerDataSourceConnection @@ -536,7 +532,6 @@ async def create_or_update_data_source_connection( prefer="return=representation", match_condition=match_condition, etag=data_source_connection.e_tag, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, **kwargs, ) @@ -578,8 +573,6 @@ async def create_or_update_indexer( self, indexer: _models.SearchIndexer, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, match_condition: MatchConditions = MatchConditions.Unconditionally, **kwargs: Any, ) -> _models.SearchIndexer: @@ -587,12 +580,6 @@ async def create_or_update_indexer( :param indexer: The SearchIndexer object to create or update. Required. :type indexer: ~azure.search.documents.indexes.models.SearchIndexer - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool - :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :paramtype disable_cache_reprocessing_change_detection: bool :keyword match_condition: The match condition to use upon the etag. Default value is None. :paramtype match_condition: ~azure.core.MatchConditions :return: SearchIndexer @@ -605,8 +592,6 @@ async def create_or_update_indexer( prefer="return=representation", match_condition=match_condition, etag=indexer.e_tag, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, **kwargs, ) @@ -648,8 +633,6 @@ async def create_or_update_skillset( self, skillset: _models.SearchIndexerSkillset, *, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, match_condition: MatchConditions = MatchConditions.Unconditionally, **kwargs: Any, ) -> _models.SearchIndexerSkillset: @@ -657,12 +640,6 @@ async def create_or_update_skillset( :param skillset: The SearchIndexerSkillset object to create or update. Required. :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :paramtype skip_indexer_reset_requirement_for_cache: bool - :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :paramtype disable_cache_reprocessing_change_detection: bool :keyword match_condition: The match condition to use upon the etag. Default value is None. :paramtype match_condition: ~azure.core.MatchConditions :return: SearchIndexerSkillset @@ -675,8 +652,6 @@ async def create_or_update_skillset( prefer="return=representation", match_condition=match_condition, etag=skillset.e_tag, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, **kwargs, ) @@ -771,94 +746,6 @@ async def get_skillset_names(self, **kwargs) -> Sequence[str]: result = await self.get_skillsets(**kwargs) return [x.name for x in result] - @distributed_trace_async - async def reset_documents( - self, - indexer: Union[str, _models.SearchIndexer], - keys_or_ids: _models.DocumentKeysOrIds, - *, - overwrite: bool = False, - **kwargs: Any, - ) -> None: - """Resets specific documents in the datasource to be selectively re-ingested by the indexer. - - :param indexer: The indexer to reset documents for. Can be the indexer name or a SearchIndexer object. - :type indexer: str or ~azure.search.documents.indexes.models.SearchIndexer - :param keys_or_ids: The document keys or ids to reset. - :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds - :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the - keys or ids in this payload will be queued to be re-ingested. Default value is False. - :paramtype overwrite: bool - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - try: - name: str = indexer.name # type: ignore - except AttributeError: - name = indexer # type: ignore - return await self._reset_documents( - name=name, - keys_or_ids=keys_or_ids, - overwrite=overwrite, - **kwargs, - ) - - @distributed_trace_async - async def reset_skills( - self, - skillset: Union[str, _models.SearchIndexerSkillset], - skill_names: List[str], - **kwargs: Any, - ) -> None: - """Reset an existing skillset in a search service. - - :param skillset: The skillset to reset skills for. Can be the skillset name or a SearchIndexerSkillset object. - :type skillset: str or ~azure.search.documents.indexes.models.SearchIndexerSkillset - :param skill_names: The names of the skills to reset. - :type skill_names: list[str] - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - try: - name: str = skillset.name # type: ignore - except AttributeError: - name = skillset # type: ignore - return await self._reset_skills( - name=name, - skill_names=_models.SkillNames(skill_names=skill_names), - **kwargs, - ) - - @distributed_trace_async - async def resync( - self, - indexer: Union[str, _models.SearchIndexer], - indexer_resync_options: List[Union[str, _models.IndexerResyncOption]], - **kwargs: Any, - ) -> None: - """Resync selective options from the datasource to be re-ingested by the indexer. - - :param indexer: The indexer to resync. Can be the indexer name or a SearchIndexer object. - :type indexer: str or ~azure.search.documents.indexes.models.SearchIndexer - :param indexer_resync_options: Re-sync options that have been pre-defined from data source. - :type indexer_resync_options: list[str or ~azure.search.documents.indexes.models.IndexerResyncOption] - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - try: - name: str = indexer.name # type: ignore - except AttributeError: - name = indexer # type: ignore - indexer_resync = _models.IndexerResyncBody(options=indexer_resync_options) - return await self._resync( - name=name, - indexer_resync=indexer_resync, - **kwargs, - ) - __all__: list[str] = [ "_SearchIndexClientOperationsMixin", diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py index bebb2d41130c..9f99b9a987cc 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py @@ -16,8 +16,6 @@ from ._models import ( # type: ignore AIServicesAccountIdentity, AIServicesAccountKey, - AIServicesVisionParameters, - AIServicesVisionVectorizer, AnalyzeResult, AnalyzeTextOptions, AnalyzedTokenInfo, @@ -26,10 +24,8 @@ AzureBlobKnowledgeSource, AzureBlobKnowledgeSourceParameters, AzureMachineLearningParameters, - AzureMachineLearningSkill, AzureMachineLearningVectorizer, AzureOpenAIEmbeddingSkill, - AzureOpenAITokenizerParameters, AzureOpenAIVectorizer, AzureOpenAIVectorizerParameters, BM25SimilarityAlgorithm, @@ -84,15 +80,10 @@ HnswAlgorithmConfiguration, HnswParameters, ImageAnalysisSkill, - IndexStatisticsSummary, IndexedOneLakeKnowledgeSource, IndexedOneLakeKnowledgeSourceParameters, - IndexedSharePointKnowledgeSource, - IndexedSharePointKnowledgeSourceParameters, - IndexerCurrentState, IndexerExecutionResult, IndexerResyncBody, - IndexerRuntime, IndexingParameters, IndexingParametersConfiguration, IndexingSchedule, @@ -136,8 +127,6 @@ PatternReplaceTokenFilter, PatternTokenizer, PhoneticTokenFilter, - RemoteSharePointKnowledgeSource, - RemoteSharePointKnowledgeSourceParameters, RescoringOptions, ResourceCounter, ScalarQuantizationCompression, @@ -151,7 +140,6 @@ SearchIndexKnowledgeSource, SearchIndexKnowledgeSourceParameters, SearchIndexer, - SearchIndexerCache, SearchIndexerDataContainer, SearchIndexerDataIdentity, SearchIndexerDataNoneIdentity, @@ -165,7 +153,6 @@ SearchIndexerKnowledgeStoreBlobProjectionSelector, SearchIndexerKnowledgeStoreFileProjectionSelector, SearchIndexerKnowledgeStoreObjectProjectionSelector, - SearchIndexerKnowledgeStoreParameters, SearchIndexerKnowledgeStoreProjection, SearchIndexerKnowledgeStoreProjectionSelector, SearchIndexerKnowledgeStoreTableProjectionSelector, @@ -184,7 +171,6 @@ SemanticPrioritizedFields, SemanticSearch, SentimentSkillV3, - ServiceIndexersRuntime, ShaperSkill, ShingleTokenFilter, SimilarityAlgorithm, @@ -212,7 +198,6 @@ VectorSearchCompression, VectorSearchProfile, VectorSearchVectorizer, - VisionVectorizeSkill, WebApiHttpHeaders, WebApiSkill, WebApiVectorizer, @@ -244,17 +229,15 @@ DocumentIntelligenceLayoutSkillOutputFormat, DocumentIntelligenceLayoutSkillOutputMode, EdgeNGramTokenFilterSide, + EntityCategory, + EntityRecognitionSkillLanguage, ImageAnalysisSkillLanguage, ImageDetail, IndexProjectionMode, - IndexedSharePointContainerName, IndexerExecutionEnvironment, IndexerExecutionStatus, - IndexerExecutionStatusDetail, - IndexerPermissionOption, IndexerResyncOption, IndexerStatus, - IndexingMode, KeyPhraseExtractionSkillLanguage, KnowledgeBaseModelKind, KnowledgeSourceContentExtractionMode, @@ -271,19 +254,16 @@ OcrLineEnding, OcrSkillLanguage, PIIDetectionSkillMaskingMode, - PermissionFilter, PhoneticEncoder, RankingOrder, RegexFlags, ScoringFunctionAggregation, ScoringFunctionInterpolation, SearchFieldDataType, - SearchIndexPermissionFilterOption, SearchIndexerDataSourceType, + SentimentSkillLanguage, SnowballTokenFilterLanguage, - SplitSkillEncoderModelName, SplitSkillLanguage, - SplitSkillUnit, StemmerTokenFilterLanguage, StopwordsList, TextSplitMode, @@ -306,8 +286,6 @@ __all__ = [ "AIServicesAccountIdentity", "AIServicesAccountKey", - "AIServicesVisionParameters", - "AIServicesVisionVectorizer", "AnalyzeResult", "AnalyzeTextOptions", "AnalyzedTokenInfo", @@ -316,10 +294,8 @@ "AzureBlobKnowledgeSource", "AzureBlobKnowledgeSourceParameters", "AzureMachineLearningParameters", - "AzureMachineLearningSkill", "AzureMachineLearningVectorizer", "AzureOpenAIEmbeddingSkill", - "AzureOpenAITokenizerParameters", "AzureOpenAIVectorizer", "AzureOpenAIVectorizerParameters", "BM25SimilarityAlgorithm", @@ -374,15 +350,10 @@ "HnswAlgorithmConfiguration", "HnswParameters", "ImageAnalysisSkill", - "IndexStatisticsSummary", "IndexedOneLakeKnowledgeSource", "IndexedOneLakeKnowledgeSourceParameters", - "IndexedSharePointKnowledgeSource", - "IndexedSharePointKnowledgeSourceParameters", - "IndexerCurrentState", "IndexerExecutionResult", "IndexerResyncBody", - "IndexerRuntime", "IndexingParameters", "IndexingParametersConfiguration", "IndexingSchedule", @@ -426,8 +397,6 @@ "PatternReplaceTokenFilter", "PatternTokenizer", "PhoneticTokenFilter", - "RemoteSharePointKnowledgeSource", - "RemoteSharePointKnowledgeSourceParameters", "RescoringOptions", "ResourceCounter", "ScalarQuantizationCompression", @@ -441,7 +410,6 @@ "SearchIndexKnowledgeSource", "SearchIndexKnowledgeSourceParameters", "SearchIndexer", - "SearchIndexerCache", "SearchIndexerDataContainer", "SearchIndexerDataIdentity", "SearchIndexerDataNoneIdentity", @@ -455,7 +423,6 @@ "SearchIndexerKnowledgeStoreBlobProjectionSelector", "SearchIndexerKnowledgeStoreFileProjectionSelector", "SearchIndexerKnowledgeStoreObjectProjectionSelector", - "SearchIndexerKnowledgeStoreParameters", "SearchIndexerKnowledgeStoreProjection", "SearchIndexerKnowledgeStoreProjectionSelector", "SearchIndexerKnowledgeStoreTableProjectionSelector", @@ -474,7 +441,6 @@ "SemanticPrioritizedFields", "SemanticSearch", "SentimentSkillV3", - "ServiceIndexersRuntime", "ShaperSkill", "ShingleTokenFilter", "SimilarityAlgorithm", @@ -502,7 +468,6 @@ "VectorSearchCompression", "VectorSearchProfile", "VectorSearchVectorizer", - "VisionVectorizeSkill", "WebApiHttpHeaders", "WebApiSkill", "WebApiVectorizer", @@ -531,17 +496,15 @@ "DocumentIntelligenceLayoutSkillOutputFormat", "DocumentIntelligenceLayoutSkillOutputMode", "EdgeNGramTokenFilterSide", + "EntityCategory", + "EntityRecognitionSkillLanguage", "ImageAnalysisSkillLanguage", "ImageDetail", "IndexProjectionMode", - "IndexedSharePointContainerName", "IndexerExecutionEnvironment", "IndexerExecutionStatus", - "IndexerExecutionStatusDetail", - "IndexerPermissionOption", "IndexerResyncOption", "IndexerStatus", - "IndexingMode", "KeyPhraseExtractionSkillLanguage", "KnowledgeBaseModelKind", "KnowledgeSourceContentExtractionMode", @@ -558,19 +521,16 @@ "OcrLineEnding", "OcrSkillLanguage", "PIIDetectionSkillMaskingMode", - "PermissionFilter", "PhoneticEncoder", "RankingOrder", "RegexFlags", "ScoringFunctionAggregation", "ScoringFunctionInterpolation", "SearchFieldDataType", - "SearchIndexPermissionFilterOption", "SearchIndexerDataSourceType", + "SentimentSkillLanguage", "SnowballTokenFilterLanguage", - "SplitSkillEncoderModelName", "SplitSkillLanguage", - "SplitSkillUnit", "StemmerTokenFilterLanguage", "StopwordsList", "TextSplitMode", diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_enums.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_enums.py index 21854d91bec5..194d9f33a598 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_enums.py @@ -41,22 +41,14 @@ class AzureOpenAIModelName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """TextEmbedding3Large model.""" TEXT_EMBEDDING3_SMALL = "text-embedding-3-small" """TextEmbedding3Small model.""" - GPT4_O = "gpt-4o" - """Gpt4o model.""" - GPT4_O_MINI = "gpt-4o-mini" - """Gpt4oMini model.""" - GPT41 = "gpt-4.1" - """Gpt41 model.""" - GPT41_MINI = "gpt-4.1-mini" - """Gpt41Mini model.""" - GPT41_NANO = "gpt-4.1-nano" - """Gpt41Nano model.""" - GPT5 = "gpt-5" - """Gpt5 model.""" GPT5_MINI = "gpt-5-mini" """Gpt5Mini model.""" GPT5_NANO = "gpt-5-nano" """Gpt5Nano model.""" + GPT54_MINI = "gpt-5.4-mini" + """Gpt54Mini model.""" + GPT54_NANO = "gpt-5.4-nano" + """Gpt54Nano model.""" class BlobIndexerDataToExtract(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -281,6 +273,76 @@ class EdgeNGramTokenFilterSide(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies that the n-gram should be generated from the back of the input.""" +class EntityCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """A string indicating what entity categories to return.""" + + LOCATION = "location" + """Entities describing a physical location.""" + ORGANIZATION = "organization" + """Entities describing an organization.""" + PERSON = "person" + """Entities describing a person.""" + QUANTITY = "quantity" + """Entities describing a quantity.""" + DATETIME = "datetime" + """Entities describing a date and time.""" + URL = "url" + """Entities describing a URL.""" + EMAIL = "email" + """Entities describing an email address.""" + + +class EntityRecognitionSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The language codes supported for input text by EntityRecognitionSkill.""" + + AR = "ar" + """Arabic.""" + CS = "cs" + """Czech.""" + ZH_HANS = "zh-Hans" + """Chinese-Simplified.""" + ZH_HANT = "zh-Hant" + """Chinese-Traditional.""" + DA = "da" + """Danish.""" + NL = "nl" + """Dutch.""" + EN = "en" + """English.""" + FI = "fi" + """Finnish.""" + FR = "fr" + """French.""" + DE = "de" + """German.""" + EL = "el" + """Greek.""" + HU = "hu" + """Hungarian.""" + IT = "it" + """Italian.""" + JA = "ja" + """Japanese.""" + KO = "ko" + """Korean.""" + NO = "no" + """Norwegian (Bokmaal).""" + PL = "pl" + """Polish.""" + PT_PT = "pt-PT" + """Portuguese (Portugal).""" + PT_BR = "pt-BR" + """Portuguese (Brazil).""" + RU = "ru" + """Russian.""" + ES = "es" + """Spanish.""" + SV = "sv" + """Swedish.""" + TR = "tr" + """Turkish.""" + + class ImageAnalysisSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The language codes supported for input by ImageAnalysisSkill.""" @@ -399,17 +461,6 @@ class ImageDetail(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Details recognized as landmarks.""" -class IndexedSharePointContainerName(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies which SharePoint libraries to access.""" - - DEFAULT_SITE_LIBRARY = "defaultSiteLibrary" - """Index content from the site's default document library.""" - ALL_SITE_LIBRARIES = "allSiteLibraries" - """Index content from every document library in the site.""" - USE_QUERY = "useQuery" - """Use a query to filter SharePoint content.""" - - class IndexerExecutionEnvironment(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies the environment in which the indexer should execute.""" @@ -436,26 +487,6 @@ class IndexerExecutionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Indexer has been reset.""" -class IndexerExecutionStatusDetail(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Details the status of an individual indexer execution.""" - - RESET_DOCS = "resetDocs" - """Indicates that the reset that occurred was for a call to ResetDocs.""" - RESYNC = "resync" - """Indicates to selectively resync based on option(s) from data source.""" - - -class IndexerPermissionOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Options with various types of permission data to index.""" - - USER_IDS = "userIds" - """Indexer to ingest ACL userIds from data source to index.""" - GROUP_IDS = "groupIds" - """Indexer to ingest ACL groupIds from data source to index.""" - RBAC_SCOPE = "rbacScope" - """Indexer to ingest Azure RBAC scope from data source to index.""" - - class IndexerResyncOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Options with various types of permission data to index.""" @@ -475,18 +506,6 @@ class IndexerStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Indicates that the indexer is running normally.""" -class IndexingMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Represents the mode the indexer is executing in.""" - - INDEXING_ALL_DOCS = "indexingAllDocs" - """The indexer is indexing all documents in the datasource.""" - INDEXING_RESET_DOCS = "indexingResetDocs" - """The indexer is indexing selective, reset documents in the datasource. The documents being - indexed are defined on indexer status.""" - INDEXING_RESYNC = "indexingResync" - """The indexer is resyncing and indexing selective option(s) from the datasource.""" - - class IndexProjectionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Defines behavior of the index projections in relation to the rest of the indexer.""" @@ -568,14 +587,10 @@ class KnowledgeSourceKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): """A knowledge source that reads data from a Search Index.""" AZURE_BLOB = "azureBlob" """A knowledge source that read and ingest data from Azure Blob Storage to a Search Index.""" - INDEXED_SHARE_POINT = "indexedSharePoint" - """A knowledge source that reads data from indexed SharePoint.""" INDEXED_ONE_LAKE = "indexedOneLake" """A knowledge source that reads data from indexed OneLake.""" WEB = "web" """A knowledge source that reads data from the web.""" - REMOTE_SHARE_POINT = "remoteSharePoint" - """A knowledge source that reads data from remote SharePoint.""" class KnowledgeSourceSynchronizationStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -1451,17 +1466,6 @@ class OcrSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Unknown (All).""" -class PermissionFilter(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """A value indicating whether the field should be used as a permission filter.""" - - USER_IDS = "userIds" - """Field represents user IDs that should be used to filter document access on queries.""" - GROUP_IDS = "groupIds" - """Field represents group IDs that should be used to filter document access on queries.""" - RBAC_SCOPE = "rbacScope" - """Field represents an RBAC scope that should be used to filter document access on queries.""" - - class PhoneticEncoder(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Identifies the type of phonetic encoder to use with a PhoneticTokenFilter.""" @@ -1629,13 +1633,39 @@ class SearchIndexerDataSourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Indicates a SharePoint datasource.""" -class SearchIndexPermissionFilterOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """A value indicating whether permission filtering is enabled for the index.""" +class SentimentSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The language codes supported for input text by SentimentSkill.""" - ENABLED = "enabled" - """enabled.""" - DISABLED = "disabled" - """disabled.""" + DA = "da" + """Danish.""" + NL = "nl" + """Dutch.""" + EN = "en" + """English.""" + FI = "fi" + """Finnish.""" + FR = "fr" + """French.""" + DE = "de" + """German.""" + EL = "el" + """Greek.""" + IT = "it" + """Italian.""" + NO = "no" + """Norwegian (Bokmaal).""" + PL = "pl" + """Polish.""" + PT_PT = "pt-PT" + """Portuguese (Portugal).""" + RU = "ru" + """Russian.""" + ES = "es" + """Spanish.""" + SV = "sv" + """Swedish.""" + TR = "tr" + """Turkish.""" class SnowballTokenFilterLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -1690,21 +1720,6 @@ class SnowballTokenFilterLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Selects the Lucene Snowball stemming tokenizer for Turkish.""" -class SplitSkillEncoderModelName(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """A value indicating which tokenizer to use.""" - - R50_K_BASE = "r50k_base" - """Refers to a base model trained with a 50,000 token vocabulary, often used in general natural - language processing tasks.""" - P50_K_BASE = "p50k_base" - """A base model with a 50,000 token vocabulary, optimized for prompt-based tasks.""" - P50_K_EDIT = "p50k_edit" - """Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token - vocabulary.""" - CL100_K_BASE = "cl100k_base" - """A base model with a 100,000 token vocabulary.""" - - class SplitSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The language codes supported for input text by SplitSkill.""" @@ -1776,15 +1791,6 @@ class SplitSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Chinese (Simplified).""" -class SplitSkillUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """A value indicating which unit to use.""" - - CHARACTERS = "characters" - """The length will be measured by character.""" - AZURE_OPEN_AI_TOKENS = "azureOpenAITokens" - """The length will be measured by an AzureOpenAI tokenizer from the tiktoken library.""" - - class StemmerTokenFilterLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The language to use for a stemmer token filter.""" diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py index bcfd8a516667..4683af974c89 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py @@ -73,7 +73,8 @@ class AIServicesAccountIdentity(CognitiveServicesAccount, discriminator="#Micros identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :ivar subdomain_url: The subdomain url for the corresponding AI Service. Required. + :ivar subdomain_url: The subdomain/Azure AI Services endpoint url for the corresponding AI + Service. Required. :vartype subdomain_url: str :ivar odata_type: A URI fragment specifying the type of Azure AI service resource attached to a skillset. Required. Default value is "#Microsoft.Azure.Search.AIServicesByIdentity". @@ -88,7 +89,7 @@ class AIServicesAccountIdentity(CognitiveServicesAccount, discriminator="#Micros unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared.""" subdomain_url: str = rest_field(name="subdomainUrl", visibility=["read", "create", "update", "delete", "query"]) - """The subdomain url for the corresponding AI Service. Required.""" + """The subdomain/Azure AI Services endpoint url for the corresponding AI Service. Required.""" odata_type: Literal["#Microsoft.Azure.Search.AIServicesByIdentity"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of Azure AI service resource attached to a skillset. Required. Default value is \"#Microsoft.Azure.Search.AIServicesByIdentity\".""" @@ -123,7 +124,8 @@ class AIServicesAccountKey(CognitiveServicesAccount, discriminator="#Microsoft.A :ivar key: The key used to provision the Azure AI service resource attached to a skillset. Required. :vartype key: str - :ivar subdomain_url: The subdomain url for the corresponding AI Service. Required. + :ivar subdomain_url: The subdomain/Azure AI Services endpoint url for the corresponding AI + Service. Required. :vartype subdomain_url: str :ivar odata_type: A URI fragment specifying the type of Azure AI service resource attached to a skillset. Required. Default value is "#Microsoft.Azure.Search.AIServicesByKey". @@ -133,7 +135,7 @@ class AIServicesAccountKey(CognitiveServicesAccount, discriminator="#Microsoft.A key: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The key used to provision the Azure AI service resource attached to a skillset. Required.""" subdomain_url: str = rest_field(name="subdomainUrl", visibility=["read", "create", "update", "delete", "query"]) - """The subdomain url for the corresponding AI Service. Required.""" + """The subdomain/Azure AI Services endpoint url for the corresponding AI Service. Required.""" odata_type: Literal["#Microsoft.Azure.Search.AIServicesByKey"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of Azure AI service resource attached to a skillset. Required. Default value is \"#Microsoft.Azure.Search.AIServicesByKey\".""" @@ -159,145 +161,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.odata_type = "#Microsoft.Azure.Search.AIServicesByKey" # type: ignore -class AIServicesVisionParameters(_Model): - """Specifies the AI Services Vision parameters for vectorizing a query image or text. - - :ivar model_version: The version of the model to use when calling the AI Services Vision - service. It will default to the latest available when not specified. Required. - :vartype model_version: str - :ivar resource_uri: The resource URI of the AI Services resource. Required. - :vartype resource_uri: str - :ivar api_key: API key of the designated AI Services resource. - :vartype api_key: str - :ivar auth_identity: The user-assigned managed identity used for outbound connections. If an - authResourceId is provided and it's not specified, the system-assigned managed identity is - used. On updates to the index, if the identity is unspecified, the value remains unchanged. If - set to "none", the value of this property is cleared. - :vartype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - - model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) - """The version of the model to use when calling the AI Services Vision service. It will default to - the latest available when not specified. Required.""" - resource_uri: str = rest_field(name="resourceUri", visibility=["read", "create", "update", "delete", "query"]) - """The resource URI of the AI Services resource. Required.""" - api_key: Optional[str] = rest_field(name="apiKey", visibility=["read", "create", "update", "delete", "query"]) - """API key of the designated AI Services resource.""" - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( - name="authIdentity", visibility=["read", "create", "update", "delete", "query"] - ) - """The user-assigned managed identity used for outbound connections. If an authResourceId is - provided and it's not specified, the system-assigned managed identity is used. On updates to - the index, if the identity is unspecified, the value remains unchanged. If set to \"none\", the - value of this property is cleared.""" - - @overload - def __init__( - self, - *, - model_version: str, - resource_uri: str, - api_key: Optional[str] = None, - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class VectorSearchVectorizer(_Model): - """Specifies the vectorization method to be used during query time. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AIServicesVisionVectorizer, AzureMachineLearningVectorizer, AzureOpenAIVectorizer, - WebApiVectorizer - - :ivar vectorizer_name: The name to associate with this particular vectorization method. - Required. - :vartype vectorizer_name: str - :ivar kind: Type of VectorSearchVectorizer. Required. Known values are: "azureOpenAI", - "customWebApi", "aiServicesVision", and "aml". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchVectorizerKind - """ - - __mapping__: dict[str, _Model] = {} - vectorizer_name: str = rest_field(name="name", visibility=["read", "create", "update", "delete", "query"]) - """The name to associate with this particular vectorization method. Required.""" - kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) - """Type of VectorSearchVectorizer. Required. Known values are: \"azureOpenAI\", \"customWebApi\", - \"aiServicesVision\", and \"aml\".""" - - @overload - def __init__( - self, - *, - vectorizer_name: str, - kind: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class AIServicesVisionVectorizer(VectorSearchVectorizer, discriminator="aiServicesVision"): - """Clears the identity property of a datasource. - - :ivar vectorizer_name: The name to associate with this particular vectorization method. - Required. - :vartype vectorizer_name: str - :ivar ai_services_vision_parameters: Contains the parameters specific to AI Services Vision - embedding vectorization. - :vartype ai_services_vision_parameters: - ~azure.search.documents.indexes.models.AIServicesVisionParameters - :ivar kind: The name of the kind of vectorization method being configured for use with vector - search. Required. Generate embeddings for an image or text input at query time using the Azure - AI Services Vision Vectorize API. - :vartype kind: str or ~azure.search.documents.indexes.models.AI_SERVICES_VISION - """ - - ai_services_vision_parameters: Optional["_models.AIServicesVisionParameters"] = rest_field( - name="aiServicesVisionParameters", visibility=["read", "create", "update", "delete", "query"] - ) - """Contains the parameters specific to AI Services Vision embedding vectorization.""" - kind: Literal[VectorSearchVectorizerKind.AI_SERVICES_VISION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The name of the kind of vectorization method being configured for use with vector search. - Required. Generate embeddings for an image or text input at query time using the Azure AI - Services Vision Vectorize API.""" - - @overload - def __init__( - self, - *, - vectorizer_name: str, - ai_services_vision_parameters: Optional["_models.AIServicesVisionParameters"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.kind = VectorSearchVectorizerKind.AI_SERVICES_VISION # type: ignore - - class AnalyzedTokenInfo(_Model): """Information about a token returned by an analyzer. @@ -608,15 +471,15 @@ class KnowledgeSource(_Model): """Represents a knowledge source definition. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AzureBlobKnowledgeSource, IndexedOneLakeKnowledgeSource, IndexedSharePointKnowledgeSource, - RemoteSharePointKnowledgeSource, SearchIndexKnowledgeSource, WebKnowledgeSource + AzureBlobKnowledgeSource, IndexedOneLakeKnowledgeSource, SearchIndexKnowledgeSource, + WebKnowledgeSource :ivar name: The name of the knowledge source. Required. :vartype name: str :ivar description: Optional user-defined description. :vartype description: str :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", - "azureBlob", "indexedSharePoint", "indexedOneLake", "web", and "remoteSharePoint". + "azureBlob", "indexedOneLake", and "web". :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeSourceKind :ivar e_tag: The ETag of the knowledge source. :vartype e_tag: str @@ -638,7 +501,7 @@ class KnowledgeSource(_Model): """Optional user-defined description.""" kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) """The type of the knowledge source. Required. Known values are: \"searchIndex\", \"azureBlob\", - \"indexedSharePoint\", \"indexedOneLake\", \"web\", and \"remoteSharePoint\".""" + \"indexedOneLake\", and \"web\".""" e_tag: Optional[str] = rest_field(name="@odata.etag", visibility=["read", "create", "update", "delete", "query"]) """The ETag of the knowledge source.""" encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field( @@ -871,70 +734,33 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class SearchIndexerSkill(_Model): - """Base type for skills. +class VectorSearchVectorizer(_Model): + """Specifies the vectorization method to be used during query time. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AzureMachineLearningSkill, ChatCompletionSkill, WebApiSkill, AzureOpenAIEmbeddingSkill, - CustomEntityLookupSkill, KeyPhraseExtractionSkill, LanguageDetectionSkill, MergeSkill, - PIIDetectionSkill, SplitSkill, TextTranslationSkill, EntityLinkingSkill, - EntityRecognitionSkillV3, SentimentSkillV3, ConditionalSkill, ContentUnderstandingSkill, - DocumentExtractionSkill, DocumentIntelligenceLayoutSkill, ShaperSkill, ImageAnalysisSkill, - OcrSkill, VisionVectorizeSkill + AzureMachineLearningVectorizer, AzureOpenAIVectorizer, WebApiVectorizer - :ivar odata_type: The discriminator for derived types. Required. Default value is None. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] + :ivar vectorizer_name: The name to associate with this particular vectorization method. + Required. + :vartype vectorizer_name: str + :ivar kind: Type of VectorSearchVectorizer. Required. Known values are: "azureOpenAI", + "customWebApi", "aiServicesVision", and "aml". + :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchVectorizerKind """ __mapping__: dict[str, _Model] = {} - odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) - """The discriminator for derived types. Required. Default value is None.""" - name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The name of the skill which uniquely identifies it within the skillset. A skill with no name - defined will be given a default name of its 1-based index in the skills array, prefixed with - the character '#'.""" - description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The description of the skill which describes the inputs, outputs, and usage of the skill.""" - context: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Represents the level at which operations take place, such as the document root or document - content (for example, /document or /document/content). The default is /document.""" - inputs: list["_models.InputFieldMappingEntry"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """Inputs of the skills could be a column in the source data set, or the output of an upstream - skill. Required.""" - outputs: list["_models.OutputFieldMappingEntry"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """The output of a skill is either a field in a search index, or a value that can be consumed as - an input by another skill. Required.""" + vectorizer_name: str = rest_field(name="name", visibility=["read", "create", "update", "delete", "query"]) + """The name to associate with this particular vectorization method. Required.""" + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) + """Type of VectorSearchVectorizer. Required. Known values are: \"azureOpenAI\", \"customWebApi\", + \"aiServicesVision\", and \"aml\".""" @overload def __init__( self, *, - odata_type: str, - inputs: list["_models.InputFieldMappingEntry"], - outputs: list["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, + vectorizer_name: str, + kind: str, ) -> None: ... @overload @@ -948,97 +774,36 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AzureMachineLearningSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Custom.AmlSkill"): - """The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) - model. Once an AML model is trained and deployed, an AML skill integrates it into AI - enrichment. +class AzureMachineLearningVectorizer(VectorSearchVectorizer, discriminator="aml"): + """Specifies an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog + for generating the vector embedding of a query string. - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar scoring_uri: (Required for no authentication or key authentication) The scoring URI of - the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. - :vartype scoring_uri: str - :ivar authentication_key: (Required for key authentication) The key for the AML service. - :vartype authentication_key: str - :ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID - of the AML service. It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. - :vartype resource_id: str - :ivar timeout: (Optional) When specified, indicates the timeout for the http client making the - API call. - :vartype timeout: ~datetime.timedelta - :ivar region: (Optional for token authentication). The region the AML service is deployed in. - :vartype region: str - :ivar degree_of_parallelism: (Optional) When specified, indicates the number of calls the - indexer will make in parallel to the endpoint you have provided. You can decrease this value if - your endpoint is failing under too high of a request load, or raise it if your endpoint is able - to accept more requests and you would like an increase in the performance of the indexer. If - not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 - and a minimum of 1. - :vartype degree_of_parallelism: int - :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is - "#Microsoft.Skills.Custom.AmlSkill". - :vartype odata_type: str + :ivar vectorizer_name: The name to associate with this particular vectorization method. + Required. + :vartype vectorizer_name: str + :ivar aml_parameters: Specifies the properties of the AML vectorizer. + :vartype aml_parameters: ~azure.search.documents.indexes.models.AzureMachineLearningParameters + :ivar kind: The name of the kind of vectorization method being configured for use with vector + search. Required. Generate embeddings using an Azure Machine Learning endpoint deployed via the + Azure AI Foundry Model Catalog at query time. + :vartype kind: str or ~azure.search.documents.indexes.models.AML """ - scoring_uri: Optional[str] = rest_field(name="uri", visibility=["read", "create", "update", "delete", "query"]) - """(Required for no authentication or key authentication) The scoring URI of the AML service to - which the JSON payload will be sent. Only the https URI scheme is allowed.""" - authentication_key: Optional[str] = rest_field( - name="key", visibility=["read", "create", "update", "delete", "query"] - ) - """(Required for key authentication) The key for the AML service.""" - resource_id: Optional[str] = rest_field( - name="resourceId", visibility=["read", "create", "update", "delete", "query"] - ) - """(Required for token authentication). The Azure Resource Manager resource ID of the AML service. - It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}.""" - timeout: Optional[datetime.timedelta] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """(Optional) When specified, indicates the timeout for the http client making the API call.""" - region: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """(Optional for token authentication). The region the AML service is deployed in.""" - degree_of_parallelism: Optional[int] = rest_field( - name="degreeOfParallelism", visibility=["read", "create", "update", "delete", "query"] + aml_parameters: Optional["_models.AzureMachineLearningParameters"] = rest_field( + name="amlParameters", visibility=["read", "create", "update", "delete", "query"] ) - """(Optional) When specified, indicates the number of calls the indexer will make in parallel to - the endpoint you have provided. You can decrease this value if your endpoint is failing under - too high of a request load, or raise it if your endpoint is able to accept more requests and - you would like an increase in the performance of the indexer. If not set, a default value of 5 - is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1.""" - odata_type: Literal["#Microsoft.Skills.Custom.AmlSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """A URI fragment specifying the type of skill. Required. Default value is - \"#Microsoft.Skills.Custom.AmlSkill\".""" + """Specifies the properties of the AML vectorizer.""" + kind: Literal[VectorSearchVectorizerKind.AML] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The name of the kind of vectorization method being configured for use with vector search. + Required. Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure + AI Foundry Model Catalog at query time.""" @overload def __init__( self, *, - inputs: list["_models.InputFieldMappingEntry"], - outputs: list["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - scoring_uri: Optional[str] = None, - authentication_key: Optional[str] = None, - resource_id: Optional[str] = None, - timeout: Optional[datetime.timedelta] = None, - region: Optional[str] = None, - degree_of_parallelism: Optional[int] = None, + vectorizer_name: str, + aml_parameters: Optional["_models.AzureMachineLearningParameters"] = None, ) -> None: ... @overload @@ -1050,39 +815,72 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) - self.odata_type = "#Microsoft.Skills.Custom.AmlSkill" # type: ignore + self.kind = VectorSearchVectorizerKind.AML # type: ignore -class AzureMachineLearningVectorizer(VectorSearchVectorizer, discriminator="aml"): - """Specifies an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog - for generating the vector embedding of a query string. +class SearchIndexerSkill(_Model): + """Base type for skills. - :ivar vectorizer_name: The name to associate with this particular vectorization method. - Required. - :vartype vectorizer_name: str - :ivar aml_parameters: Specifies the properties of the AML vectorizer. - :vartype aml_parameters: ~azure.search.documents.indexes.models.AzureMachineLearningParameters - :ivar kind: The name of the kind of vectorization method being configured for use with vector - search. Required. Generate embeddings using an Azure Machine Learning endpoint deployed via the - Azure AI Foundry Model Catalog at query time. - :vartype kind: str or ~azure.search.documents.indexes.models.AML + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ChatCompletionSkill, WebApiSkill, AzureOpenAIEmbeddingSkill, CustomEntityLookupSkill, + KeyPhraseExtractionSkill, LanguageDetectionSkill, MergeSkill, PIIDetectionSkill, SplitSkill, + TextTranslationSkill, EntityLinkingSkill, EntityRecognitionSkillV3, SentimentSkillV3, + ConditionalSkill, ContentUnderstandingSkill, DocumentExtractionSkill, + DocumentIntelligenceLayoutSkill, ShaperSkill, ImageAnalysisSkill, OcrSkill + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the skills array, + prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] """ - aml_parameters: Optional["_models.AzureMachineLearningParameters"] = rest_field( - name="amlParameters", visibility=["read", "create", "update", "delete", "query"] + __mapping__: dict[str, _Model] = {} + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) + """The discriminator for derived types. Required. Default value is None.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The name of the skill which uniquely identifies it within the skillset. A skill with no name + defined will be given a default name of its 1-based index in the skills array, prefixed with + the character '#'.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The description of the skill which describes the inputs, outputs, and usage of the skill.""" + context: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Represents the level at which operations take place, such as the document root or document + content (for example, /document or /document/content). The default is /document.""" + inputs: list["_models.InputFieldMappingEntry"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] ) - """Specifies the properties of the AML vectorizer.""" - kind: Literal[VectorSearchVectorizerKind.AML] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The name of the kind of vectorization method being configured for use with vector search. - Required. Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure - AI Foundry Model Catalog at query time.""" + """Inputs of the skills could be a column in the source data set, or the output of an upstream + skill. Required.""" + outputs: list["_models.OutputFieldMappingEntry"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The output of a skill is either a field in a search index, or a value that can be consumed as + an input by another skill. Required.""" @overload def __init__( self, *, - vectorizer_name: str, - aml_parameters: Optional["_models.AzureMachineLearningParameters"] = None, + odata_type: str, + inputs: list["_models.InputFieldMappingEntry"], + outputs: list["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, ) -> None: ... @overload @@ -1094,7 +892,6 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) - self.kind = VectorSearchVectorizerKind.AML # type: ignore class AzureOpenAIEmbeddingSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"): @@ -1127,8 +924,7 @@ class AzureOpenAIEmbeddingSkill(SearchIndexerSkill, discriminator="#Microsoft.Sk :vartype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity :ivar model_name: The name of the embedding model that is deployed at the provided deploymentId path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", - "text-embedding-3-small", "gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-5", "gpt-5-mini", and "gpt-5-nano". + "text-embedding-3-small", "gpt-5-mini", "gpt-5-nano", "gpt-5.4-mini", and "gpt-5.4-nano". :vartype model_name: str or ~azure.search.documents.indexes.models.AzureOpenAIModelName :ivar dimensions: The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. @@ -1157,8 +953,7 @@ class AzureOpenAIEmbeddingSkill(SearchIndexerSkill, discriminator="#Microsoft.Sk ) """The name of the embedding model that is deployed at the provided deploymentId path. Known values are: \"text-embedding-ada-002\", \"text-embedding-3-large\", \"text-embedding-3-small\", - \"gpt-4o\", \"gpt-4o-mini\", \"gpt-4.1\", \"gpt-4.1-mini\", \"gpt-4.1-nano\", \"gpt-5\", - \"gpt-5-mini\", and \"gpt-5-nano\".""" + \"gpt-5-mini\", \"gpt-5-nano\", \"gpt-5.4-mini\", and \"gpt-5.4-nano\".""" dimensions: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.""" @@ -1195,51 +990,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.odata_type = "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill" # type: ignore -class AzureOpenAITokenizerParameters(_Model): - """Azure OpenAI Tokenizer parameters. - - :ivar encoder_model_name: Only applies if the unit is set to azureOpenAITokens. Options include - 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. - Known values are: "r50k_base", "p50k_base", "p50k_edit", and "cl100k_base". - :vartype encoder_model_name: str or - ~azure.search.documents.indexes.models.SplitSkillEncoderModelName - :ivar allowed_special_tokens: (Optional) Only applies if the unit is set to azureOpenAITokens. - This parameter defines a collection of special tokens that are permitted within the - tokenization process. - :vartype allowed_special_tokens: list[str] - """ - - encoder_model_name: Optional[Union[str, "_models.SplitSkillEncoderModelName"]] = rest_field( - name="encoderModelName", visibility=["read", "create", "update", "delete", "query"] - ) - """Only applies if the unit is set to azureOpenAITokens. Options include 'R50k_base', 'P50k_base', - 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. Known values are: - \"r50k_base\", \"p50k_base\", \"p50k_edit\", and \"cl100k_base\".""" - allowed_special_tokens: Optional[list[str]] = rest_field( - name="allowedSpecialTokens", visibility=["read", "create", "update", "delete", "query"] - ) - """(Optional) Only applies if the unit is set to azureOpenAITokens. This parameter defines a - collection of special tokens that are permitted within the tokenization process.""" - - @overload - def __init__( - self, - *, - encoder_model_name: Optional[Union[str, "_models.SplitSkillEncoderModelName"]] = None, - allowed_special_tokens: Optional[list[str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class AzureOpenAIVectorizer(VectorSearchVectorizer, discriminator="azureOpenAI"): """Specifies the Azure OpenAI resource used to vectorize a query string. @@ -1294,8 +1044,7 @@ class AzureOpenAIVectorizerParameters(_Model): :vartype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity :ivar model_name: The name of the embedding model that is deployed at the provided deploymentId path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", - "text-embedding-3-small", "gpt-4o", "gpt-4o-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - "gpt-5", "gpt-5-mini", and "gpt-5-nano". + "text-embedding-3-small", "gpt-5-mini", "gpt-5-nano", "gpt-5.4-mini", and "gpt-5.4-nano". :vartype model_name: str or ~azure.search.documents.indexes.models.AzureOpenAIModelName """ @@ -1318,8 +1067,7 @@ class AzureOpenAIVectorizerParameters(_Model): ) """The name of the embedding model that is deployed at the provided deploymentId path. Known values are: \"text-embedding-ada-002\", \"text-embedding-3-large\", \"text-embedding-3-small\", - \"gpt-4o\", \"gpt-4o-mini\", \"gpt-4.1\", \"gpt-4.1-mini\", \"gpt-4.1-nano\", \"gpt-5\", - \"gpt-5-mini\", and \"gpt-5-nano\".""" + \"gpt-5-mini\", \"gpt-5-nano\", \"gpt-5.4-mini\", and \"gpt-5.4-nano\".""" @overload def __init__( @@ -1809,24 +1557,6 @@ class ChatCompletionSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.C :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] :ivar uri: The url for the Web API. Required. :vartype uri: str - :ivar http_headers: The headers required to make the http request. - :vartype http_headers: ~azure.search.documents.indexes.models.WebApiHttpHeaders - :ivar http_method: The method for the http request. - :vartype http_method: str - :ivar timeout: The desired timeout for the request. Default is 30 seconds. - :vartype timeout: ~datetime.timedelta - :ivar batch_size: The desired batch size which indicates number of documents. - :vartype batch_size: int - :ivar degree_of_parallelism: If set, the number of parallel calls that can be made to the Web - API. - :vartype degree_of_parallelism: int - :ivar auth_resource_id: Applies to custom skills that connect to external code in an Azure - function or some other application that provides the transformations. This value should be the - application ID created for the function or app when it was registered with Azure Active - Directory. When specified, the custom skill connects to the function or app using a managed ID - (either system or user-assigned) of the search service and the access token of the function or - app, using this value as the resource id for creating the scope of the access token. - :vartype auth_resource_id: str :ivar auth_identity: The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. @@ -1856,31 +1586,6 @@ class ChatCompletionSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.C uri: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The url for the Web API. Required.""" - http_headers: Optional["_models.WebApiHttpHeaders"] = rest_field( - name="httpHeaders", visibility=["read", "create", "update", "delete", "query"] - ) - """The headers required to make the http request.""" - http_method: Optional[str] = rest_field( - name="httpMethod", visibility=["read", "create", "update", "delete", "query"] - ) - """The method for the http request.""" - timeout: Optional[datetime.timedelta] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The desired timeout for the request. Default is 30 seconds.""" - batch_size: Optional[int] = rest_field(name="batchSize", visibility=["read", "create", "update", "delete", "query"]) - """The desired batch size which indicates number of documents.""" - degree_of_parallelism: Optional[int] = rest_field( - name="degreeOfParallelism", visibility=["read", "create", "update", "delete", "query"] - ) - """If set, the number of parallel calls that can be made to the Web API.""" - auth_resource_id: Optional[str] = rest_field( - name="authResourceId", visibility=["read", "create", "update", "delete", "query"] - ) - """Applies to custom skills that connect to external code in an Azure function or some other - application that provides the transformations. This value should be the application ID created - for the function or app when it was registered with Azure Active Directory. When specified, the - custom skill connects to the function or app using a managed ID (either system or - user-assigned) of the search service and the access token of the function or app, using this - value as the resource id for creating the scope of the access token.""" auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( name="authIdentity", visibility=["read", "create", "update", "delete", "query"] ) @@ -1924,12 +1629,6 @@ def __init__( name: Optional[str] = None, description: Optional[str] = None, context: Optional[str] = None, - http_headers: Optional["_models.WebApiHttpHeaders"] = None, - http_method: Optional[str] = None, - timeout: Optional[datetime.timedelta] = None, - batch_size: Optional[int] = None, - degree_of_parallelism: Optional[int] = None, - auth_resource_id: Optional[str] = None, auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, api_key: Optional[str] = None, common_model_parameters: Optional["_models.ChatCompletionCommonModelParameters"] = None, @@ -3870,9 +3569,12 @@ class EntityRecognitionSkillV3(SearchIndexerSkill, discriminator="#Microsoft.Ski be consumed as an input by another skill. Required. :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] :ivar categories: A list of entity categories that should be extracted. - :vartype categories: list[str] + :vartype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - :vartype default_language_code: str + Known values are: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", "el", + "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", and "tr". + :vartype default_language_code: str or + ~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. @@ -3886,12 +3588,17 @@ class EntityRecognitionSkillV3(SearchIndexerSkill, discriminator="#Microsoft.Ski :vartype odata_type: str """ - categories: Optional[list[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + categories: Optional[list[Union[str, "_models.EntityCategory"]]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A list of entity categories that should be extracted.""" - default_language_code: Optional[str] = rest_field( + default_language_code: Optional[Union[str, "_models.EntityRecognitionSkillLanguage"]] = rest_field( name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] ) - """A value indicating which language code to use. Default is ``en``.""" + """A value indicating which language code to use. Default is ``en``. Known values are: \"ar\", + \"cs\", \"zh-Hans\", \"zh-Hant\", \"da\", \"nl\", \"en\", \"fi\", \"fr\", \"de\", \"el\", + \"hu\", \"it\", \"ja\", \"ko\", \"no\", \"pl\", \"pt-PT\", \"pt-BR\", \"ru\", \"es\", \"sv\", + and \"tr\".""" minimum_precision: Optional[float] = rest_field( name="minimumPrecision", visibility=["read", "create", "update", "delete", "query"] ) @@ -3917,8 +3624,8 @@ def __init__( name: Optional[str] = None, description: Optional[str] = None, context: Optional[str] = None, - categories: Optional[list[str]] = None, - default_language_code: Optional[str] = None, + categories: Optional[list[Union[str, "_models.EntityCategory"]]] = None, + default_language_code: Optional[Union[str, "_models.EntityRecognitionSkillLanguage"]] = None, minimum_precision: Optional[float] = None, model_version: Optional[str] = None, ) -> None: ... @@ -4569,200 +4276,12 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class IndexedSharePointKnowledgeSource(KnowledgeSource, discriminator="indexedSharePoint"): - """Configuration for SharePoint knowledge source. - - :ivar name: The name of the knowledge source. Required. - :vartype name: str - :ivar description: Optional user-defined description. - :vartype description: str - :ivar e_tag: The ETag of the knowledge source. - :vartype e_tag: str - :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your knowledge source - definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once - you have encrypted your knowledge source definition, it will always remain encrypted. The - search service will ignore attempts to set this property to null. You can change this property - as needed if you want to rotate your encryption key; Your knowledge source definition will be - unaffected. Encryption with customer-managed keys is not available for free search services, - and is only available for paid services created on or after January 1, 2019. - :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :ivar kind: Required. A knowledge source that reads data from indexed SharePoint. - :vartype kind: str or ~azure.search.documents.indexes.models.INDEXED_SHARE_POINT - :ivar indexed_share_point_parameters: The parameters for the knowledge source. Required. - :vartype indexed_share_point_parameters: - ~azure.search.documents.indexes.models.IndexedSharePointKnowledgeSourceParameters - """ - - kind: Literal[KnowledgeSourceKind.INDEXED_SHARE_POINT] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """Required. A knowledge source that reads data from indexed SharePoint.""" - indexed_share_point_parameters: "_models.IndexedSharePointKnowledgeSourceParameters" = rest_field( - name="indexedSharePointParameters", visibility=["read", "create", "update", "delete", "query"] - ) - """The parameters for the knowledge source. Required.""" - - @overload - def __init__( - self, - *, - name: str, - indexed_share_point_parameters: "_models.IndexedSharePointKnowledgeSourceParameters", - description: Optional[str] = None, - e_tag: Optional[str] = None, - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.kind = KnowledgeSourceKind.INDEXED_SHARE_POINT # type: ignore - - -class IndexedSharePointKnowledgeSourceParameters(_Model): # pylint: disable=name-too-long - """Parameters for SharePoint knowledge source. - - :ivar connection_string: SharePoint connection string with format: - SharePointOnlineEndpoint=[SharePoint site url];ApplicationId=[Azure AD App - ID];ApplicationSecret=[Azure AD App client secret];TenantId=[SharePoint site tenant id]. - Required. - :vartype connection_string: str - :ivar container_name: Specifies which SharePoint libraries to access. Required. Known values - are: "defaultSiteLibrary", "allSiteLibraries", and "useQuery". - :vartype container_name: str or - ~azure.search.documents.indexes.models.IndexedSharePointContainerName - :ivar query: Optional query to filter SharePoint content. - :vartype query: str - :ivar ingestion_parameters: Consolidates all general ingestion settings. - :vartype ingestion_parameters: - ~azure.search.documents.knowledgebases.models.KnowledgeSourceIngestionParameters - :ivar created_resources: Resources created by the knowledge source. - :vartype created_resources: ~azure.search.documents.indexes.models.CreatedResources - """ - - connection_string: str = rest_field( - name="connectionString", visibility=["read", "create", "update", "delete", "query"] - ) - """SharePoint connection string with format: SharePointOnlineEndpoint=[SharePoint site - url];ApplicationId=[Azure AD App ID];ApplicationSecret=[Azure AD App client - secret];TenantId=[SharePoint site tenant id]. Required.""" - container_name: Union[str, "_models.IndexedSharePointContainerName"] = rest_field( - name="containerName", visibility=["read", "create", "update", "delete", "query"] - ) - """Specifies which SharePoint libraries to access. Required. Known values are: - \"defaultSiteLibrary\", \"allSiteLibraries\", and \"useQuery\".""" - query: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Optional query to filter SharePoint content.""" - ingestion_parameters: Optional["_knowledgebases_models3.KnowledgeSourceIngestionParameters"] = rest_field( - name="ingestionParameters", visibility=["read", "create", "update", "delete", "query"] - ) - """Consolidates all general ingestion settings.""" - created_resources: Optional["_models.CreatedResources"] = rest_field(name="createdResources", visibility=["read"]) - """Resources created by the knowledge source.""" - - @overload - def __init__( - self, - *, - connection_string: str, - container_name: Union[str, "_models.IndexedSharePointContainerName"], - query: Optional[str] = None, - ingestion_parameters: Optional["_knowledgebases_models3.KnowledgeSourceIngestionParameters"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class IndexerCurrentState(_Model): - """Represents all of the state that defines and dictates the indexer's current execution. - - :ivar mode: The mode the indexer is running in. Known values are: "indexingAllDocs", - "indexingResetDocs", and "indexingResync". - :vartype mode: str or ~azure.search.documents.indexes.models.IndexingMode - :ivar all_docs_initial_tracking_state: Change tracking state used when indexing starts on all - documents in the datasource. - :vartype all_docs_initial_tracking_state: str - :ivar all_docs_final_tracking_state: Change tracking state value when indexing finishes on all - documents in the datasource. - :vartype all_docs_final_tracking_state: str - :ivar reset_docs_initial_tracking_state: Change tracking state used when indexing starts on - select, reset documents in the datasource. - :vartype reset_docs_initial_tracking_state: str - :ivar reset_docs_final_tracking_state: Change tracking state value when indexing finishes on - select, reset documents in the datasource. - :vartype reset_docs_final_tracking_state: str - :ivar resync_initial_tracking_state: Change tracking state used when indexing starts on - selective options from the datasource. - :vartype resync_initial_tracking_state: str - :ivar resync_final_tracking_state: Change tracking state value when indexing finishes on - selective options from the datasource. - :vartype resync_final_tracking_state: str - :ivar reset_document_keys: The list of document keys that have been reset. The document key is - the document's unique identifier for the data in the search index. The indexer will prioritize - selectively re-ingesting these keys. - :vartype reset_document_keys: list[str] - :ivar reset_datasource_document_ids: The list of datasource document ids that have been reset. - The datasource document id is the unique identifier for the data in the datasource. The indexer - will prioritize selectively re-ingesting these ids. - :vartype reset_datasource_document_ids: list[str] - """ - - mode: Optional[Union[str, "_models.IndexingMode"]] = rest_field(visibility=["read"]) - """The mode the indexer is running in. Known values are: \"indexingAllDocs\", - \"indexingResetDocs\", and \"indexingResync\".""" - all_docs_initial_tracking_state: Optional[str] = rest_field(name="allDocsInitialTrackingState", visibility=["read"]) - """Change tracking state used when indexing starts on all documents in the datasource.""" - all_docs_final_tracking_state: Optional[str] = rest_field(name="allDocsFinalTrackingState", visibility=["read"]) - """Change tracking state value when indexing finishes on all documents in the datasource.""" - reset_docs_initial_tracking_state: Optional[str] = rest_field( - name="resetDocsInitialTrackingState", visibility=["read"] - ) - """Change tracking state used when indexing starts on select, reset documents in the datasource.""" - reset_docs_final_tracking_state: Optional[str] = rest_field(name="resetDocsFinalTrackingState", visibility=["read"]) - """Change tracking state value when indexing finishes on select, reset documents in the - datasource.""" - resync_initial_tracking_state: Optional[str] = rest_field(name="resyncInitialTrackingState", visibility=["read"]) - """Change tracking state used when indexing starts on selective options from the datasource.""" - resync_final_tracking_state: Optional[str] = rest_field(name="resyncFinalTrackingState", visibility=["read"]) - """Change tracking state value when indexing finishes on selective options from the datasource.""" - reset_document_keys: Optional[list[str]] = rest_field(name="resetDocumentKeys", visibility=["read"]) - """The list of document keys that have been reset. The document key is the document's unique - identifier for the data in the search index. The indexer will prioritize selectively - re-ingesting these keys.""" - reset_datasource_document_ids: Optional[list[str]] = rest_field( - name="resetDatasourceDocumentIds", visibility=["read"] - ) - """The list of datasource document ids that have been reset. The datasource document id is the - unique identifier for the data in the datasource. The indexer will prioritize selectively - re-ingesting these ids.""" - - class IndexerExecutionResult(_Model): """Represents the result of an individual indexer execution. :ivar status: The outcome of this indexer execution. Required. Known values are: "transientFailure", "success", "inProgress", and "reset". :vartype status: str or ~azure.search.documents.indexes.models.IndexerExecutionStatus - :ivar status_detail: The outcome of this indexer execution. Known values are: "resetDocs" and - "resync". - :vartype status_detail: str or - ~azure.search.documents.indexes.models.IndexerExecutionStatusDetail - :ivar mode: The mode the indexer is running in. Known values are: "indexingAllDocs", - "indexingResetDocs", and "indexingResync". - :vartype mode: str or ~azure.search.documents.indexes.models.IndexingMode :ivar error_message: The error message indicating the top-level error, if any. :vartype error_message: str :ivar start_time: The start time of this indexer execution. @@ -4789,13 +4308,6 @@ class IndexerExecutionResult(_Model): status: Union[str, "_models.IndexerExecutionStatus"] = rest_field(visibility=["read"]) """The outcome of this indexer execution. Required. Known values are: \"transientFailure\", \"success\", \"inProgress\", and \"reset\".""" - status_detail: Optional[Union[str, "_models.IndexerExecutionStatusDetail"]] = rest_field( - name="statusDetail", visibility=["read"] - ) - """The outcome of this indexer execution. Known values are: \"resetDocs\" and \"resync\".""" - mode: Optional[Union[str, "_models.IndexingMode"]] = rest_field(visibility=["read"]) - """The mode the indexer is running in. Known values are: \"indexingAllDocs\", - \"indexingResetDocs\", and \"indexingResync\".""" error_message: Optional[str] = rest_field(name="errorMessage", visibility=["read"]) """The error message indicating the top-level error, if any.""" start_time: Optional[datetime.datetime] = rest_field(name="startTime", visibility=["read"], format="rfc3339") @@ -4847,61 +4359,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class IndexerRuntime(_Model): - """Represents the indexer's cumulative runtime consumption in the service. - - :ivar used_seconds: Cumulative runtime of the indexer from the beginningTime to endingTime, in - seconds. Required. - :vartype used_seconds: int - :ivar remaining_seconds: Cumulative runtime remaining for all indexers in the service from the - beginningTime to endingTime, in seconds. - :vartype remaining_seconds: int - :ivar beginning_time: Beginning UTC time of the 24-hour period considered for indexer runtime - usage (inclusive). Required. - :vartype beginning_time: ~datetime.datetime - :ivar ending_time: End UTC time of the 24-hour period considered for indexer runtime usage - (inclusive). Required. - :vartype ending_time: ~datetime.datetime - """ - - used_seconds: int = rest_field(name="usedSeconds", visibility=["read", "create", "update", "delete", "query"]) - """Cumulative runtime of the indexer from the beginningTime to endingTime, in seconds. Required.""" - remaining_seconds: Optional[int] = rest_field( - name="remainingSeconds", visibility=["read", "create", "update", "delete", "query"] - ) - """Cumulative runtime remaining for all indexers in the service from the beginningTime to - endingTime, in seconds.""" - beginning_time: datetime.datetime = rest_field( - name="beginningTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" - ) - """Beginning UTC time of the 24-hour period considered for indexer runtime usage (inclusive). - Required.""" - ending_time: datetime.datetime = rest_field( - name="endingTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" - ) - """End UTC time of the 24-hour period considered for indexer runtime usage (inclusive). Required.""" - - @overload - def __init__( - self, - *, - used_seconds: int, - beginning_time: datetime.datetime, - ending_time: datetime.datetime, - remaining_seconds: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class IndexingParameters(_Model): """Represents parameters for indexer execution. @@ -5179,67 +4636,25 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class IndexingSchedule(_Model): """Represents a schedule for indexer execution. - :ivar interval: The interval of time between indexer executions. Required. - :vartype interval: ~datetime.timedelta - :ivar start_time: The time when an indexer should start running. - :vartype start_time: ~datetime.datetime - """ - - interval: datetime.timedelta = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The interval of time between indexer executions. Required.""" - start_time: Optional[datetime.datetime] = rest_field( - name="startTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" - ) - """The time when an indexer should start running.""" - - @overload - def __init__( - self, - *, - interval: datetime.timedelta, - start_time: Optional[datetime.datetime] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class IndexStatisticsSummary(_Model): - """Statistics for a given index. Statistics are collected periodically and are not guaranteed to - always be up-to-date. - - :ivar name: The name of the index. Required. - :vartype name: str - :ivar document_count: The number of documents in the index. Required. - :vartype document_count: int - :ivar storage_size: The amount of storage in bytes consumed by the index. Required. - :vartype storage_size: int - :ivar vector_index_size: The amount of memory in bytes consumed by vectors in the index. - Required. - :vartype vector_index_size: int + :ivar interval: The interval of time between indexer executions. Required. + :vartype interval: ~datetime.timedelta + :ivar start_time: The time when an indexer should start running. + :vartype start_time: ~datetime.datetime """ - name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The name of the index. Required.""" - document_count: int = rest_field(name="documentCount", visibility=["read"]) - """The number of documents in the index. Required.""" - storage_size: int = rest_field(name="storageSize", visibility=["read"]) - """The amount of storage in bytes consumed by the index. Required.""" - vector_index_size: int = rest_field(name="vectorIndexSize", visibility=["read"]) - """The amount of memory in bytes consumed by vectors in the index. Required.""" + interval: datetime.timedelta = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The interval of time between indexer executions. Required.""" + start_time: Optional[datetime.datetime] = rest_field( + name="startTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """The time when an indexer should start running.""" @overload def __init__( self, *, - name: str, + interval: datetime.timedelta, + start_time: Optional[datetime.datetime] = None, ) -> None: ... @overload @@ -5577,25 +4992,12 @@ class KnowledgeBase(_Model): list[~azure.search.documents.indexes.models.KnowledgeSourceReference] :ivar models: Contains configuration options on how to connect to AI models. :vartype models: list[~azure.search.documents.indexes.models.KnowledgeBaseModel] - :ivar retrieval_reasoning_effort: The retrieval reasoning effort configuration. - :vartype retrieval_reasoning_effort: - ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffort - :ivar output_mode: The output mode for the knowledge base. Known values are: "extractiveData" - and "answerSynthesis". - :vartype output_mode: str or - ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalOutputMode :ivar e_tag: The ETag of the knowledge base. :vartype e_tag: str :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey :ivar description: The description of the knowledge base. :vartype description: str - :ivar retrieval_instructions: Instructions considered by the knowledge base when developing - query plan. - :vartype retrieval_instructions: str - :ivar answer_instructions: Instructions considered by the knowledge base when generating - answers. - :vartype answer_instructions: str """ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -5608,15 +5010,6 @@ class KnowledgeBase(_Model): visibility=["read", "create", "update", "delete", "query"] ) """Contains configuration options on how to connect to AI models.""" - retrieval_reasoning_effort: Optional["_knowledgebases_models3.KnowledgeRetrievalReasoningEffort"] = rest_field( - name="retrievalReasoningEffort", visibility=["read", "create", "update", "delete", "query"] - ) - """The retrieval reasoning effort configuration.""" - output_mode: Optional[Union[str, "_knowledgebases_models3.KnowledgeRetrievalOutputMode"]] = rest_field( - name="outputMode", visibility=["read", "create", "update", "delete", "query"] - ) - """The output mode for the knowledge base. Known values are: \"extractiveData\" and - \"answerSynthesis\".""" e_tag: Optional[str] = rest_field(name="@odata.etag", visibility=["read", "create", "update", "delete", "query"]) """The ETag of the knowledge base.""" encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field( @@ -5625,14 +5018,6 @@ class KnowledgeBase(_Model): """A description of an encryption key that you create in Azure Key Vault.""" description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The description of the knowledge base.""" - retrieval_instructions: Optional[str] = rest_field( - name="retrievalInstructions", visibility=["read", "create", "update", "delete", "query"] - ) - """Instructions considered by the knowledge base when developing query plan.""" - answer_instructions: Optional[str] = rest_field( - name="answerInstructions", visibility=["read", "create", "update", "delete", "query"] - ) - """Instructions considered by the knowledge base when generating answers.""" @overload def __init__( @@ -5641,13 +5026,9 @@ def __init__( name: str, knowledge_sources: list["_models.KnowledgeSourceReference"], models: Optional[list["_models.KnowledgeBaseModel"]] = None, - retrieval_reasoning_effort: Optional["_knowledgebases_models3.KnowledgeRetrievalReasoningEffort"] = None, - output_mode: Optional[Union[str, "_knowledgebases_models3.KnowledgeRetrievalOutputMode"]] = None, e_tag: Optional[str] = None, encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, description: Optional[str] = None, - retrieval_instructions: Optional[str] = None, - answer_instructions: Optional[str] = None, ) -> None: ... @overload @@ -7315,112 +6696,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.odata_type = "#Microsoft.Skills.Text.PIIDetectionSkill" # type: ignore -class RemoteSharePointKnowledgeSource(KnowledgeSource, discriminator="remoteSharePoint"): - """Configuration for remote SharePoint knowledge source. - - :ivar name: The name of the knowledge source. Required. - :vartype name: str - :ivar description: Optional user-defined description. - :vartype description: str - :ivar e_tag: The ETag of the knowledge source. - :vartype e_tag: str - :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your knowledge source - definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once - you have encrypted your knowledge source definition, it will always remain encrypted. The - search service will ignore attempts to set this property to null. You can change this property - as needed if you want to rotate your encryption key; Your knowledge source definition will be - unaffected. Encryption with customer-managed keys is not available for free search services, - and is only available for paid services created on or after January 1, 2019. - :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :ivar kind: Required. A knowledge source that reads data from remote SharePoint. - :vartype kind: str or ~azure.search.documents.indexes.models.REMOTE_SHARE_POINT - :ivar remote_share_point_parameters: The parameters for the remote SharePoint knowledge source. - :vartype remote_share_point_parameters: - ~azure.search.documents.indexes.models.RemoteSharePointKnowledgeSourceParameters - """ - - kind: Literal[KnowledgeSourceKind.REMOTE_SHARE_POINT] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """Required. A knowledge source that reads data from remote SharePoint.""" - remote_share_point_parameters: Optional["_models.RemoteSharePointKnowledgeSourceParameters"] = rest_field( - name="remoteSharePointParameters", visibility=["read", "create", "update", "delete", "query"] - ) - """The parameters for the remote SharePoint knowledge source.""" - - @overload - def __init__( - self, - *, - name: str, - description: Optional[str] = None, - e_tag: Optional[str] = None, - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, - remote_share_point_parameters: Optional["_models.RemoteSharePointKnowledgeSourceParameters"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.kind = KnowledgeSourceKind.REMOTE_SHARE_POINT # type: ignore - - -class RemoteSharePointKnowledgeSourceParameters(_Model): # pylint: disable=name-too-long - """Parameters for remote SharePoint knowledge source. - - :ivar filter_expression: Keyword Query Language (KQL) expression with queryable SharePoint - properties and attributes to scope the retrieval before the query runs. - :vartype filter_expression: str - :ivar resource_metadata: A list of metadata fields to be returned for each item in the - response. Only retrievable metadata properties can be included in this list. By default, no - metadata is returned. - :vartype resource_metadata: list[str] - :ivar container_type_id: Container ID for SharePoint Embedded connection. When this is null, it - will use SharePoint Online. - :vartype container_type_id: str - """ - - filter_expression: Optional[str] = rest_field( - name="filterExpression", visibility=["read", "create", "update", "delete", "query"] - ) - """Keyword Query Language (KQL) expression with queryable SharePoint properties and attributes to - scope the retrieval before the query runs.""" - resource_metadata: Optional[list[str]] = rest_field( - name="resourceMetadata", visibility=["read", "create", "update", "delete", "query"] - ) - """A list of metadata fields to be returned for each item in the response. Only retrievable - metadata properties can be included in this list. By default, no metadata is returned.""" - container_type_id: Optional[str] = rest_field( - name="containerTypeId", visibility=["read", "create", "update", "delete", "query"] - ) - """Container ID for SharePoint Embedded connection. When this is null, it will use SharePoint - Online.""" - - @overload - def __init__( - self, - *, - filter_expression: Optional[str] = None, - resource_metadata: Optional[list[str]] = None, - container_type_id: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class RescoringOptions(_Model): """Contains the options for rescoring. @@ -7765,12 +7040,6 @@ class SearchField(_Model): Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. :vartype facetable: bool - :ivar permission_filter: A value indicating whether the field should be used as a permission - filter. Known values are: "userIds", "groupIds", and "rbacScope". - :vartype permission_filter: str or ~azure.search.documents.indexes.models.PermissionFilter - :ivar sensitivity_label: A value indicating whether the field contains sensitivity label - information. - :vartype sensitivity_label: bool :ivar analyzer_name: The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null @@ -7929,15 +7198,6 @@ class SearchField(_Model): property must be null for complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields.""" - permission_filter: Optional[Union[str, "_models.PermissionFilter"]] = rest_field( - name="permissionFilter", visibility=["read", "create", "update", "delete", "query"] - ) - """A value indicating whether the field should be used as a permission filter. Known values are: - \"userIds\", \"groupIds\", and \"rbacScope\".""" - sensitivity_label: Optional[bool] = rest_field( - name="sensitivityLabel", visibility=["read", "create", "update", "delete", "query"] - ) - """A value indicating whether the field contains sensitivity label information.""" analyzer_name: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field( name="analyzer", visibility=["read", "create", "update", "delete", "query"] ) @@ -8064,8 +7324,6 @@ def __init__( filterable: Optional[bool] = None, sortable: Optional[bool] = None, facetable: Optional[bool] = None, - permission_filter: Optional[Union[str, "_models.PermissionFilter"]] = None, - sensitivity_label: Optional[bool] = None, analyzer_name: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, search_analyzer_name: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, index_analyzer_name: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, @@ -8137,12 +7395,6 @@ class SearchIndex(_Model): :vartype semantic_search: ~azure.search.documents.indexes.models.SemanticSearch :ivar vector_search: Contains configuration options related to vector search. :vartype vector_search: ~azure.search.documents.indexes.models.VectorSearch - :ivar permission_filter_option: A value indicating whether permission filtering is enabled for - the index. Known values are: "enabled" and "disabled". - :vartype permission_filter_option: str or - ~azure.search.documents.indexes.models.SearchIndexPermissionFilterOption - :ivar purview_enabled: A value indicating whether Purview is enabled for the index. - :vartype purview_enabled: bool :ivar e_tag: The ETag of the index. :vartype e_tag: str """ @@ -8215,15 +7467,6 @@ class SearchIndex(_Model): name="vectorSearch", visibility=["read", "create", "update", "delete", "query"] ) """Contains configuration options related to vector search.""" - permission_filter_option: Optional[Union[str, "_models.SearchIndexPermissionFilterOption"]] = rest_field( - name="permissionFilterOption", visibility=["read", "create", "update", "delete", "query"] - ) - """A value indicating whether permission filtering is enabled for the index. Known values are: - \"enabled\" and \"disabled\".""" - purview_enabled: Optional[bool] = rest_field( - name="purviewEnabled", visibility=["read", "create", "update", "delete", "query"] - ) - """A value indicating whether Purview is enabled for the index.""" e_tag: Optional[str] = rest_field(name="@odata.etag", visibility=["read", "create", "update", "delete", "query"]) """The ETag of the index.""" @@ -8247,8 +7490,6 @@ def __init__( similarity: Optional["_models.SimilarityAlgorithm"] = None, semantic_search: Optional["_models.SemanticSearch"] = None, vector_search: Optional["_models.VectorSearch"] = None, - permission_filter_option: Optional[Union[str, "_models.SearchIndexPermissionFilterOption"]] = None, - purview_enabled: Optional[bool] = None, e_tag: Optional[str] = None, ) -> None: ... @@ -8301,9 +7542,6 @@ class SearchIndexer(_Model): keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :ivar cache: Adds caching to an enrichment pipeline to allow for incremental modification steps - without having to rebuild the index every time. - :vartype cache: ~azure.search.documents.indexes.models.SearchIndexerCache """ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -8357,11 +7595,6 @@ class SearchIndexer(_Model): indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019.""" - cache: Optional["_models.SearchIndexerCache"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """Adds caching to an enrichment pipeline to allow for incremental modification steps without - having to rebuild the index every time.""" @overload def __init__( @@ -8379,65 +7612,6 @@ def __init__( is_disabled: Optional[bool] = None, e_tag: Optional[str] = None, encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, - cache: Optional["_models.SearchIndexerCache"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class SearchIndexerCache(_Model): - """The type of the cache. - - :ivar id: A guid for the SearchIndexerCache. - :vartype id: str - :ivar storage_connection_string: The connection string to the storage account where the cache - data will be persisted. - :vartype storage_connection_string: str - :ivar enable_reprocessing: Specifies whether incremental reprocessing is enabled. - :vartype enable_reprocessing: bool - :ivar identity: The user-assigned managed identity used for connections to the enrichment - cache. If the connection string indicates an identity (ResourceId) and it's not specified, the - system-assigned managed identity is used. On updates to the indexer, if the identity is - unspecified, the value remains unchanged. If set to "none", the value of this property is - cleared. - :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - - id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """A guid for the SearchIndexerCache.""" - storage_connection_string: Optional[str] = rest_field( - name="storageConnectionString", visibility=["read", "create", "update", "delete", "query"] - ) - """The connection string to the storage account where the cache data will be persisted.""" - enable_reprocessing: Optional[bool] = rest_field( - name="enableReprocessing", visibility=["read", "create", "update", "delete", "query"] - ) - """Specifies whether incremental reprocessing is enabled.""" - identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """The user-assigned managed identity used for connections to the enrichment cache. If the - connection string indicates an identity (ResourceId) and it's not specified, the - system-assigned managed identity is used. On updates to the indexer, if the identity is - unspecified, the value remains unchanged. If set to \"none\", the value of this property is - cleared.""" - - @overload - def __init__( - self, - *, - id: Optional[str] = None, # pylint: disable=redefined-builtin - storage_connection_string: Optional[str] = None, - enable_reprocessing: Optional[bool] = None, - identity: Optional["_models.SearchIndexerDataIdentity"] = None, ) -> None: ... @overload @@ -8563,9 +7737,6 @@ class SearchIndexerDataSourceConnection(_Model): :ivar type: The type of the datasource. Required. Known values are: "azuresql", "cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2", "onelake", and "sharepoint". :vartype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType - :ivar sub_type: A specific type of the data source, in case the resource is capable of - different modalities. For example, 'MongoDb' for certain 'cosmosDb' accounts. - :vartype sub_type: str :ivar credentials: Credentials for the datasource. Required. :vartype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials :ivar container: The data container for the datasource. Required. @@ -8575,9 +7746,6 @@ class SearchIndexerDataSourceConnection(_Model): not specified, the value remains unchanged. If "none" is specified, the value of this property is cleared. :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :ivar indexer_permission_options: Ingestion options with various types of permission data. - :vartype indexer_permission_options: list[str or - ~azure.search.documents.indexes.models.IndexerPermissionOption] :ivar data_change_detection_policy: The data change detection policy for the datasource. :vartype data_change_detection_policy: ~azure.search.documents.indexes.models.DataChangeDetectionPolicy @@ -8606,9 +7774,6 @@ class SearchIndexerDataSourceConnection(_Model): ) """The type of the datasource. Required. Known values are: \"azuresql\", \"cosmosdb\", \"azureblob\", \"azuretable\", \"mysql\", \"adlsgen2\", \"onelake\", and \"sharepoint\".""" - sub_type: Optional[str] = rest_field(name="subType", visibility=["read"]) - """A specific type of the data source, in case the resource is capable of different modalities. - For example, 'MongoDb' for certain 'cosmosDb' accounts.""" credentials: "_models.DataSourceCredentials" = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -8623,10 +7788,6 @@ class SearchIndexerDataSourceConnection(_Model): """An explicit managed identity to use for this datasource. If not specified and the connection string is a managed identity, the system-assigned managed identity is used. If not specified, the value remains unchanged. If \"none\" is specified, the value of this property is cleared.""" - indexer_permission_options: Optional[list[Union[str, "_models.IndexerPermissionOption"]]] = rest_field( - name="indexerPermissionOptions", visibility=["read", "create", "update", "delete", "query"] - ) - """Ingestion options with various types of permission data.""" data_change_detection_policy: Optional["_models.DataChangeDetectionPolicy"] = rest_field( name="dataChangeDetectionPolicy", visibility=["read", "create", "update", "delete", "query"] ) @@ -8661,7 +7822,6 @@ def __init__( container: "_models.SearchIndexerDataContainer", description: Optional[str] = None, identity: Optional["_models.SearchIndexerDataIdentity"] = None, - indexer_permission_options: Optional[list[Union[str, "_models.IndexerPermissionOption"]]] = None, data_change_detection_policy: Optional["_models.DataChangeDetectionPolicy"] = None, data_deletion_detection_policy: Optional["_models.DataDeletionDetectionPolicy"] = None, e_tag: Optional[str] = None, @@ -8930,10 +8090,6 @@ class SearchIndexerKnowledgeStore(_Model): to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :ivar parameters: A dictionary of knowledge store-specific configuration properties. Each name - is the name of a specific property. Each value must be of a primitive type. - :vartype parameters: - ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreParameters """ storage_connection_string: str = rest_field( @@ -8952,11 +8108,6 @@ class SearchIndexerKnowledgeStore(_Model): specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared.""" - parameters: Optional["_models.SearchIndexerKnowledgeStoreParameters"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """A dictionary of knowledge store-specific configuration properties. Each name is the name of a - specific property. Each value must be of a primitive type.""" @overload def __init__( @@ -8965,7 +8116,6 @@ def __init__( storage_connection_string: str, projections: list["_models.SearchIndexerKnowledgeStoreProjection"], identity: Optional["_models.SearchIndexerDataIdentity"] = None, - parameters: Optional["_models.SearchIndexerKnowledgeStoreParameters"] = None, ) -> None: ... @overload @@ -9166,38 +8316,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class SearchIndexerKnowledgeStoreParameters(_Model): - """A dictionary of knowledge store-specific configuration properties. Each name is the name of a - specific property. Each value must be of a primitive type. - - :ivar synthesize_generated_key_name: Whether or not projections should synthesize a generated - key name if one isn't already present. - :vartype synthesize_generated_key_name: bool - """ - - synthesize_generated_key_name: Optional[bool] = rest_field( - name="synthesizeGeneratedKeyName", visibility=["read", "create", "update", "delete", "query"] - ) - """Whether or not projections should synthesize a generated key name if one isn't already present.""" - - @overload - def __init__( - self, - *, - synthesize_generated_key_name: Optional[bool] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class SearchIndexerKnowledgeStoreProjection(_Model): """Container object for various projection selectors. @@ -9414,9 +8532,6 @@ class SearchIndexerStatus(_Model): :ivar status: Overall indexer status. Required. Known values are: "unknown", "error", and "running". :vartype status: str or ~azure.search.documents.indexes.models.IndexerStatus - :ivar runtime: Snapshot of the indexer's cumulative runtime consumption for the service over - the current UTC period. Required. - :vartype runtime: ~azure.search.documents.indexes.models.IndexerRuntime :ivar last_result: The result of the most recent or an in-progress indexer execution. :vartype last_result: ~azure.search.documents.indexes.models.IndexerExecutionResult :ivar execution_history: History of the recent indexer executions, sorted in reverse @@ -9424,26 +8539,18 @@ class SearchIndexerStatus(_Model): :vartype execution_history: list[~azure.search.documents.indexes.models.IndexerExecutionResult] :ivar limits: The execution limits for the indexer. Required. :vartype limits: ~azure.search.documents.indexes.models.SearchIndexerLimits - :ivar current_state: All of the state that defines and dictates the indexer's current - execution. - :vartype current_state: ~azure.search.documents.indexes.models.IndexerCurrentState """ name: str = rest_field(visibility=["read"]) """The name of the indexer. Required.""" status: Union[str, "_models.IndexerStatus"] = rest_field(visibility=["read"]) """Overall indexer status. Required. Known values are: \"unknown\", \"error\", and \"running\".""" - runtime: "_models.IndexerRuntime" = rest_field(visibility=["read"]) - """Snapshot of the indexer's cumulative runtime consumption for the service over the current UTC - period. Required.""" last_result: Optional["_models.IndexerExecutionResult"] = rest_field(name="lastResult", visibility=["read"]) """The result of the most recent or an in-progress indexer execution.""" execution_history: list["_models.IndexerExecutionResult"] = rest_field(name="executionHistory", visibility=["read"]) """History of the recent indexer executions, sorted in reverse chronological order. Required.""" limits: "_models.SearchIndexerLimits" = rest_field(visibility=["read"]) """The execution limits for the indexer. Required.""" - current_state: Optional["_models.IndexerCurrentState"] = rest_field(name="currentState", visibility=["read"]) - """All of the state that defines and dictates the indexer's current execution.""" class SearchIndexerWarning(_Model): @@ -9665,12 +8772,6 @@ class SearchIndexResponse(_Model): :vartype semantic: ~azure.search.documents.indexes.models.SemanticSearch :ivar vector_search: Contains configuration options related to vector search. :vartype vector_search: ~azure.search.documents.indexes.models.VectorSearch - :ivar permission_filter_option: A value indicating whether permission filtering is enabled for - the index. Known values are: "enabled" and "disabled". - :vartype permission_filter_option: str or - ~azure.search.documents.indexes.models.SearchIndexPermissionFilterOption - :ivar purview_enabled: A value indicating whether Purview is enabled for the index. - :vartype purview_enabled: bool :ivar e_tag: The ETag of the index. :vartype e_tag: str """ @@ -9745,15 +8846,6 @@ class SearchIndexResponse(_Model): name="vectorSearch", visibility=["read", "create", "update", "delete", "query"] ) """Contains configuration options related to vector search.""" - permission_filter_option: Optional[Union[str, "_models.SearchIndexPermissionFilterOption"]] = rest_field( - name="permissionFilterOption", visibility=["read", "create", "update", "delete", "query"] - ) - """A value indicating whether permission filtering is enabled for the index. Known values are: - \"enabled\" and \"disabled\".""" - purview_enabled: Optional[bool] = rest_field( - name="purviewEnabled", visibility=["read", "create", "update", "delete", "query"] - ) - """A value indicating whether Purview is enabled for the index.""" e_tag: Optional[str] = rest_field(name="@odata.etag", visibility=["read", "create", "update", "delete", "query"]) """The ETag of the index.""" @@ -9777,8 +8869,6 @@ def __init__( similarity: Optional["_models.SimilarityAlgorithm"] = None, semantic: Optional["_models.SemanticSearch"] = None, vector_search: Optional["_models.VectorSearch"] = None, - permission_filter_option: Optional[Union[str, "_models.SearchIndexPermissionFilterOption"]] = None, - purview_enabled: Optional[bool] = None, e_tag: Optional[str] = None, ) -> None: ... @@ -10048,18 +9138,12 @@ class SearchServiceStatistics(_Model): :vartype counters: ~azure.search.documents.indexes.models.SearchServiceCounters :ivar limits: Service level general limits. Required. :vartype limits: ~azure.search.documents.indexes.models.SearchServiceLimits - :ivar indexers_runtime: Service level indexer runtime consumption. Required. - :vartype indexers_runtime: ~azure.search.documents.indexes.models.ServiceIndexersRuntime """ counters: "_models.SearchServiceCounters" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Service level resource counters. Required.""" limits: "_models.SearchServiceLimits" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Service level general limits. Required.""" - indexers_runtime: "_models.ServiceIndexersRuntime" = rest_field( - name="indexersRuntime", visibility=["read", "create", "update", "delete", "query"] - ) - """Service level indexer runtime consumption. Required.""" @overload def __init__( @@ -10067,7 +9151,6 @@ def __init__( *, counters: "_models.SearchServiceCounters", limits: "_models.SearchServiceLimits", - indexers_runtime: "_models.ServiceIndexersRuntime", ) -> None: ... @overload @@ -10139,9 +9222,6 @@ class SemanticConfiguration(_Model): :ivar ranking_order: Specifies the score type to be used for the sort order of the search results. Known values are: "BoostedRerankerScore" and "RerankerScore". :vartype ranking_order: str or ~azure.search.documents.indexes.models.RankingOrder - :ivar flighting_opt_in: Determines which semantic or query rewrite models to use during model - flighting/upgrades. - :vartype flighting_opt_in: bool """ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -10157,10 +9237,6 @@ class SemanticConfiguration(_Model): ) """Specifies the score type to be used for the sort order of the search results. Known values are: \"BoostedRerankerScore\" and \"RerankerScore\".""" - flighting_opt_in: Optional[bool] = rest_field( - name="flightingOptIn", visibility=["read", "create", "update", "delete", "query"] - ) - """Determines which semantic or query rewrite models to use during model flighting/upgrades.""" @overload def __init__( @@ -10169,7 +9245,6 @@ def __init__( name: str, prioritized_fields: "_models.SemanticPrioritizedFields", ranking_order: Optional[Union[str, "_models.RankingOrder"]] = None, - flighting_opt_in: Optional[bool] = None, ) -> None: ... @overload @@ -10331,7 +9406,10 @@ class SentimentSkillV3(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text be consumed as an input by another skill. Required. :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - :vartype default_language_code: str + Known values are: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", "ru", + "es", "sv", and "tr". + :vartype default_language_code: str or + ~azure.search.documents.indexes.models.SentimentSkillLanguage :ivar include_opinion_mining: If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated assessment (adjective) in the text. Default is false. @@ -10345,10 +9423,12 @@ class SentimentSkillV3(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text :vartype odata_type: str """ - default_language_code: Optional[str] = rest_field( + default_language_code: Optional[Union[str, "_models.SentimentSkillLanguage"]] = rest_field( name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] ) - """A value indicating which language code to use. Default is ``en``.""" + """A value indicating which language code to use. Default is ``en``. Known values are: \"da\", + \"nl\", \"en\", \"fi\", \"fr\", \"de\", \"el\", \"it\", \"no\", \"pl\", \"pt-PT\", \"ru\", + \"es\", \"sv\", and \"tr\".""" include_opinion_mining: Optional[bool] = rest_field( name="includeOpinionMining", visibility=["read", "create", "update", "delete", "query"] ) @@ -10374,7 +9454,7 @@ def __init__( name: Optional[str] = None, description: Optional[str] = None, context: Optional[str] = None, - default_language_code: Optional[str] = None, + default_language_code: Optional[Union[str, "_models.SentimentSkillLanguage"]] = None, include_opinion_mining: Optional[bool] = None, model_version: Optional[str] = None, ) -> None: ... @@ -10391,62 +9471,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.odata_type = "#Microsoft.Skills.Text.V3.SentimentSkill" # type: ignore -class ServiceIndexersRuntime(_Model): - """Represents service-level indexer runtime counters. - - :ivar used_seconds: Cumulative runtime of all indexers in the service from the beginningTime to - endingTime, in seconds. Required. - :vartype used_seconds: int - :ivar remaining_seconds: Cumulative runtime remaining for all indexers in the service from the - beginningTime to endingTime, in seconds. - :vartype remaining_seconds: int - :ivar beginning_time: Beginning UTC time of the 24-hour period considered for indexer runtime - usage (inclusive). Required. - :vartype beginning_time: ~datetime.datetime - :ivar ending_time: End UTC time of the 24-hour period considered for indexer runtime usage - (inclusive). Required. - :vartype ending_time: ~datetime.datetime - """ - - used_seconds: int = rest_field(name="usedSeconds", visibility=["read", "create", "update", "delete", "query"]) - """Cumulative runtime of all indexers in the service from the beginningTime to endingTime, in - seconds. Required.""" - remaining_seconds: Optional[int] = rest_field( - name="remainingSeconds", visibility=["read", "create", "update", "delete", "query"] - ) - """Cumulative runtime remaining for all indexers in the service from the beginningTime to - endingTime, in seconds.""" - beginning_time: datetime.datetime = rest_field( - name="beginningTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" - ) - """Beginning UTC time of the 24-hour period considered for indexer runtime usage (inclusive). - Required.""" - ending_time: datetime.datetime = rest_field( - name="endingTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" - ) - """End UTC time of the 24-hour period considered for indexer runtime usage (inclusive). Required.""" - - @overload - def __init__( - self, - *, - used_seconds: int, - beginning_time: datetime.datetime, - ending_time: datetime.datetime, - remaining_seconds: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class ShaperSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Util.ShaperSkill"): """A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). @@ -10753,16 +9777,6 @@ class SplitSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.Split 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. :vartype maximum_pages_to_take: int - :ivar unit: Only applies if textSplitMode is set to pages. There are two possible values. The - choice of the values will decide the length (maximumPageLength and pageOverlapLength) - measurement. The default is 'characters', which means the length will be measured by character. - Known values are: "characters" and "azureOpenAITokens". - :vartype unit: str or ~azure.search.documents.indexes.models.SplitSkillUnit - :ivar azure_open_ai_tokenizer_parameters: Only applies if the unit is set to azureOpenAITokens. - If specified, the splitSkill will use these parameters when performing the tokenization. The - parameters are a valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. - :vartype azure_open_ai_tokenizer_parameters: - ~azure.search.documents.indexes.models.AzureOpenAITokenizerParameters :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is "#Microsoft.Skills.Text.SplitSkill". :vartype odata_type: str @@ -10794,19 +9808,6 @@ class SplitSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.Split """Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document.""" - unit: Optional[Union[str, "_models.SplitSkillUnit"]] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """Only applies if textSplitMode is set to pages. There are two possible values. The choice of the - values will decide the length (maximumPageLength and pageOverlapLength) measurement. The - default is 'characters', which means the length will be measured by character. Known values - are: \"characters\" and \"azureOpenAITokens\".""" - azure_open_ai_tokenizer_parameters: Optional["_models.AzureOpenAITokenizerParameters"] = rest_field( - name="azureOpenAITokenizerParameters", visibility=["read", "create", "update", "delete", "query"] - ) - """Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use - these parameters when performing the tokenization. The parameters are a valid - 'encoderModelName' and an optional 'allowedSpecialTokens' property.""" odata_type: Literal["#Microsoft.Skills.Text.SplitSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.SplitSkill\".""" @@ -10825,8 +9826,6 @@ def __init__( maximum_page_length: Optional[int] = None, page_overlap_length: Optional[int] = None, maximum_pages_to_take: Optional[int] = None, - unit: Optional[Union[str, "_models.SplitSkillUnit"]] = None, - azure_open_ai_tokenizer_parameters: Optional["_models.AzureOpenAITokenizerParameters"] = None, ) -> None: ... @overload @@ -11710,65 +10709,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class VisionVectorizeSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Vision.VectorizeSkill"): - """Allows you to generate a vector embedding for a given image or text input using the Azure AI - Services Vision Vectorize API. - - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar model_version: The version of the model to use when calling the AI Services Vision - service. It will default to the latest available when not specified. Required. - :vartype model_version: str - :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is - "#Microsoft.Skills.Vision.VectorizeSkill". - :vartype odata_type: str - """ - - model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) - """The version of the model to use when calling the AI Services Vision service. It will default to - the latest available when not specified. Required.""" - odata_type: Literal["#Microsoft.Skills.Vision.VectorizeSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """A URI fragment specifying the type of skill. Required. Default value is - \"#Microsoft.Skills.Vision.VectorizeSkill\".""" - - @overload - def __init__( - self, - *, - inputs: list["_models.InputFieldMappingEntry"], - outputs: list["_models.OutputFieldMappingEntry"], - model_version: str, - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.odata_type = "#Microsoft.Skills.Vision.VectorizeSkill" # type: ignore - - class WebApiHttpHeaders(_Model): """A dictionary of http request headers.""" diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_patch.py index 3d78f011e728..128bdd3bb4df 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_patch.py @@ -19,9 +19,6 @@ SplitSkillLanguage, TextTranslationSkillLanguage, ) -from ...knowledgebases.models import ( - KnowledgeRetrievalReasoningEffort, -) if TYPE_CHECKING: from ._models import ( @@ -32,7 +29,7 @@ SearchIndexerDataIdentity, SearchResourceEncryptionKey, ) - from ._enums import IndexerPermissionOption, SearchIndexerDataSourceType + from ._enums import SearchIndexerDataSourceType class SearchField(_SearchField): @@ -93,7 +90,6 @@ def __init__( container: "SearchIndexerDataContainer", description: Optional[str] = None, identity: Optional["SearchIndexerDataIdentity"] = None, - indexer_permission_options: Optional[List[Union[str, "IndexerPermissionOption"]]] = None, data_change_detection_policy: Optional["DataChangeDetectionPolicy"] = None, data_deletion_detection_policy: Optional["DataDeletionDetectionPolicy"] = None, e_tag: Optional[str] = None, @@ -110,7 +106,6 @@ def __init__( container: "SearchIndexerDataContainer", description: Optional[str] = None, identity: Optional["SearchIndexerDataIdentity"] = None, - indexer_permission_options: Optional[List[Union[str, "IndexerPermissionOption"]]] = None, data_change_detection_policy: Optional["DataChangeDetectionPolicy"] = None, data_deletion_detection_policy: Optional["DataDeletionDetectionPolicy"] = None, e_tag: Optional[str] = None, @@ -129,18 +124,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class KnowledgeBase(_KnowledgeBase): - """Represents a knowledge base definition. - - This class adds proper deserialization of the retrieval_reasoning_effort field - which uses discriminated polymorphism from the knowledgebases models. - """ + """Represents a knowledge base definition.""" def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) - # Properly deserialize retrieval_reasoning_effort if it's a dict - effort = self.retrieval_reasoning_effort - if effort is not None and isinstance(effort, dict): - self.retrieval_reasoning_effort = KnowledgeRetrievalReasoningEffort._deserialize(effort, []) def _collection_helper(typ: Any) -> str: diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_client.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_client.py index dc6edd562aa6..09be3b6eb7fa 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_client.py @@ -26,15 +26,15 @@ class KnowledgeBaseRetrievalClient(_KnowledgeBaseRetrievalClientOperationsMixin): """KnowledgeBaseRetrievalClient. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_configuration.py index f7ab40eddeb8..6820fa6faf5b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_configuration.py @@ -23,20 +23,20 @@ class KnowledgeBaseRetrievalClientConfiguration: # pylint: disable=too-many-ins Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2025-11-01-preview") + api_version: str = kwargs.pop("api_version", "2026-04-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_operations/_operations.py index a583552685b7..3bfd42374859 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_operations/_operations.py @@ -5,7 +5,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint: disable=import-error from io import IOBase import json from typing import Any, Callable, IO, Optional, TypeVar, Union, overload @@ -42,13 +42,13 @@ def build_knowledge_base_retrieval_retrieve_request( # pylint: disable=name-too-long - knowledge_base_name: str, *, query_source_authorization: Optional[str] = None, **kwargs: Any + knowledge_base_name: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2026-04-01")) accept = _headers.pop("Accept", "application/json;odata.metadata=minimal") # Construct URL @@ -63,11 +63,8 @@ def build_knowledge_base_retrieval_retrieve_request( # pylint: disable=name-too _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - if query_source_authorization is not None: - _headers["x-ms-query-source-authorization"] = _SERIALIZER.header( - "query_source_authorization", query_source_authorization, "str" - ) + if accept is not None: + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -84,7 +81,6 @@ def retrieve( knowledge_base_name: str, retrieval_request: _models1.KnowledgeBaseRetrievalRequest, *, - query_source_authorization: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models1.KnowledgeBaseRetrievalResponse: @@ -95,10 +91,6 @@ def retrieve( :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -114,7 +106,6 @@ def retrieve( knowledge_base_name: str, retrieval_request: JSON, *, - query_source_authorization: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models1.KnowledgeBaseRetrievalResponse: @@ -124,10 +115,6 @@ def retrieve( :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: JSON - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -143,7 +130,6 @@ def retrieve( knowledge_base_name: str, retrieval_request: IO[bytes], *, - query_source_authorization: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models1.KnowledgeBaseRetrievalResponse: @@ -153,10 +139,6 @@ def retrieve( :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: IO[bytes] - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -171,8 +153,6 @@ def retrieve( self, knowledge_base_name: str, retrieval_request: Union[_models1.KnowledgeBaseRetrievalRequest, JSON, IO[bytes]], - *, - query_source_authorization: Optional[str] = None, **kwargs: Any ) -> _models1.KnowledgeBaseRetrievalResponse: """KnowledgeBase retrieves relevant data from backing stores. @@ -184,10 +164,6 @@ def retrieve( :type retrieval_request: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest or JSON or IO[bytes] - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str :return: KnowledgeBaseRetrievalResponse. The KnowledgeBaseRetrievalResponse is compatible with MutableMapping :rtype: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalResponse @@ -216,7 +192,6 @@ def retrieve( _request = build_knowledge_base_retrieval_retrieve_request( knowledge_base_name=knowledge_base_name, - query_source_authorization=query_source_authorization, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -228,6 +203,7 @@ def retrieve( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -249,7 +225,7 @@ def retrieve( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models1.KnowledgeBaseRetrievalResponse, response.json()) diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_utils/model_base.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_utils/model_base.py index c402af2afc63..f8511ab0c707 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_utils/model_base.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_utils/model_base.py @@ -22,7 +22,7 @@ from datetime import datetime, date, time, timedelta, timezone from json import JSONEncoder import xml.etree.ElementTree as ET -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint: disable=import-error from typing_extensions import Self import isodate from azure.core.exceptions import DeserializationError @@ -515,6 +515,8 @@ def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: return self._data.setdefault(key, default) def __eq__(self, other: typing.Any) -> bool: + if isinstance(other, _MyMutableMapping): + return self._data == other._data try: other_model = self.__class__(other) except Exception: @@ -628,6 +630,9 @@ def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: if len(items) > 0: existed_attr_keys.append(xml_name) dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + elif not rf._is_optional: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = [] continue # text element is primitive type @@ -690,7 +695,7 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: cls._attr_to_rest_field: dict[str, _RestField] = dict(attr_to_rest_field.items()) cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") - return super().__new__(cls) + return super().__new__(cls) # pylint: disable=no-value-for-parameter def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: for base in cls.__bases__: @@ -889,6 +894,8 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-retur # is it optional? try: if any(a is _NONE_TYPE for a in annotation.__args__): # pyright: ignore + if rf: + rf._is_optional = True if len(annotation.__args__) <= 2: # pyright: ignore if_obj_deserializer = _get_deserialize_callable_from_annotation( next(a for a in annotation.__args__ if a is not _NONE_TYPE), module, rf # pyright: ignore @@ -981,16 +988,20 @@ def _deserialize_with_callable( return float(value.text) if value.text else None if deserializer is bool: return value.text == "true" if value.text else None + if deserializer and deserializer in _DESERIALIZE_MAPPING.values(): + return deserializer(value.text) if value.text else None + if deserializer and deserializer in _DESERIALIZE_MAPPING_WITHFORMAT.values(): + return deserializer(value.text) if value.text else None if deserializer is None: return value if deserializer in [int, float, bool]: return deserializer(value) if isinstance(deserializer, CaseInsensitiveEnumMeta): try: - return deserializer(value) + return deserializer(value.text if isinstance(value, ET.Element) else value) except ValueError: # for unknown value, return raw value - return value + return value.text if isinstance(value, ET.Element) else value if isinstance(deserializer, type) and issubclass(deserializer, Model): return deserializer._deserialize(value, []) return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) @@ -1043,6 +1054,7 @@ def _failsafe_deserialize_xml( return None +# pylint: disable=too-many-instance-attributes class _RestField: def __init__( self, @@ -1062,6 +1074,7 @@ def __init__( self._is_discriminator = is_discriminator self._visibility = visibility self._is_model = False + self._is_optional = False self._default = default self._format = format self._is_multipart_file_input = is_multipart_file_input diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_client.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_client.py index 3e7834244a6a..085ecb8d75a3 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_client.py @@ -26,15 +26,15 @@ class KnowledgeBaseRetrievalClient(_KnowledgeBaseRetrievalClientOperationsMixin): """KnowledgeBaseRetrievalClient. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_configuration.py index 81dada9e9ccb..5b03df359c71 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_configuration.py @@ -23,22 +23,22 @@ class KnowledgeBaseRetrievalClientConfiguration: # pylint: disable=too-many-ins Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Known values are - "2025-11-01-preview" and None. Default value is "2025-11-01-preview". Note that overriding this - default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - api_version: str = kwargs.pop("api_version", "2025-11-01-preview") + api_version: str = kwargs.pop("api_version", "2026-04-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_operations/_operations.py index 4ebadf1ddfc9..03cd83e2cf8e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_operations/_operations.py @@ -6,7 +6,7 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from collections.abc import MutableMapping +from collections.abc import MutableMapping # pylint: disable=import-error from io import IOBase import json from typing import Any, Callable, IO, Optional, TypeVar, Union, overload @@ -49,7 +49,6 @@ async def retrieve( knowledge_base_name: str, retrieval_request: _models2.KnowledgeBaseRetrievalRequest, *, - query_source_authorization: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models2.KnowledgeBaseRetrievalResponse: @@ -60,10 +59,6 @@ async def retrieve( :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -79,7 +74,6 @@ async def retrieve( knowledge_base_name: str, retrieval_request: JSON, *, - query_source_authorization: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models2.KnowledgeBaseRetrievalResponse: @@ -89,10 +83,6 @@ async def retrieve( :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: JSON - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -108,7 +98,6 @@ async def retrieve( knowledge_base_name: str, retrieval_request: IO[bytes], *, - query_source_authorization: Optional[str] = None, content_type: str = "application/json", **kwargs: Any ) -> _models2.KnowledgeBaseRetrievalResponse: @@ -118,10 +107,6 @@ async def retrieve( :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: IO[bytes] - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -136,8 +121,6 @@ async def retrieve( self, knowledge_base_name: str, retrieval_request: Union[_models2.KnowledgeBaseRetrievalRequest, JSON, IO[bytes]], - *, - query_source_authorization: Optional[str] = None, **kwargs: Any ) -> _models2.KnowledgeBaseRetrievalResponse: """KnowledgeBase retrieves relevant data from backing stores. @@ -149,10 +132,6 @@ async def retrieve( :type retrieval_request: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest or JSON or IO[bytes] - :keyword query_source_authorization: Token identifying the user for which the query is being - executed. This token is used to enforce security restrictions on documents. Default value is - None. - :paramtype query_source_authorization: str :return: KnowledgeBaseRetrievalResponse. The KnowledgeBaseRetrievalResponse is compatible with MutableMapping :rtype: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalResponse @@ -181,7 +160,6 @@ async def retrieve( _request = build_knowledge_base_retrieval_retrieve_request( knowledge_base_name=knowledge_base_name, - query_source_authorization=query_source_authorization, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -193,6 +171,7 @@ async def retrieve( } _request.url = self._client.format_url(_request.url, **path_format_arguments) + _decompress = kwargs.pop("decompress", True) _stream = kwargs.pop("stream", False) pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access _request, stream=_stream, **kwargs @@ -214,7 +193,7 @@ async def retrieve( raise HttpResponseError(response=response, model=error) if _stream: - deserialized = response.iter_bytes() + deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: deserialized = _deserialize(_models2.KnowledgeBaseRetrievalResponse, response.json()) diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/__init__.py index d232f3747a10..f1ad9d4a69a4 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/__init__.py @@ -18,7 +18,6 @@ AzureBlobKnowledgeSourceParams, CompletedSynchronizationState, IndexedOneLakeKnowledgeSourceParams, - IndexedSharePointKnowledgeSourceParams, KnowledgeBaseActivityRecord, KnowledgeBaseAgenticReasoningActivityRecord, KnowledgeBaseAzureBlobReference, @@ -26,22 +25,16 @@ KnowledgeBaseErrorDetail, KnowledgeBaseImageContent, KnowledgeBaseIndexedOneLakeReference, - KnowledgeBaseIndexedSharePointReference, KnowledgeBaseMessage, KnowledgeBaseMessageContent, KnowledgeBaseMessageImageContent, KnowledgeBaseMessageTextContent, - KnowledgeBaseModelAnswerSynthesisActivityRecord, - KnowledgeBaseModelQueryPlanningActivityRecord, KnowledgeBaseReference, - KnowledgeBaseRemoteSharePointReference, KnowledgeBaseRetrievalRequest, KnowledgeBaseRetrievalResponse, KnowledgeBaseSearchIndexReference, KnowledgeBaseWebReference, KnowledgeRetrievalIntent, - KnowledgeRetrievalLowReasoningEffort, - KnowledgeRetrievalMediumReasoningEffort, KnowledgeRetrievalMinimalReasoningEffort, KnowledgeRetrievalReasoningEffort, KnowledgeRetrievalSemanticIntent, @@ -50,10 +43,9 @@ KnowledgeSourceParams, KnowledgeSourceStatistics, KnowledgeSourceStatus, + KnowledgeSourceSynchronizationError, KnowledgeSourceVectorizer, - RemoteSharePointKnowledgeSourceParams, SearchIndexKnowledgeSourceParams, - SharePointSensitivityLabelInfo, SynchronizationState, WebKnowledgeSourceParams, ) @@ -63,7 +55,6 @@ KnowledgeBaseMessageContentType, KnowledgeBaseReferenceType, KnowledgeRetrievalIntentType, - KnowledgeRetrievalOutputMode, KnowledgeRetrievalReasoningEffortKind, ) from ._patch import __all__ as _patch_all @@ -75,7 +66,6 @@ "AzureBlobKnowledgeSourceParams", "CompletedSynchronizationState", "IndexedOneLakeKnowledgeSourceParams", - "IndexedSharePointKnowledgeSourceParams", "KnowledgeBaseActivityRecord", "KnowledgeBaseAgenticReasoningActivityRecord", "KnowledgeBaseAzureBlobReference", @@ -83,22 +73,16 @@ "KnowledgeBaseErrorDetail", "KnowledgeBaseImageContent", "KnowledgeBaseIndexedOneLakeReference", - "KnowledgeBaseIndexedSharePointReference", "KnowledgeBaseMessage", "KnowledgeBaseMessageContent", "KnowledgeBaseMessageImageContent", "KnowledgeBaseMessageTextContent", - "KnowledgeBaseModelAnswerSynthesisActivityRecord", - "KnowledgeBaseModelQueryPlanningActivityRecord", "KnowledgeBaseReference", - "KnowledgeBaseRemoteSharePointReference", "KnowledgeBaseRetrievalRequest", "KnowledgeBaseRetrievalResponse", "KnowledgeBaseSearchIndexReference", "KnowledgeBaseWebReference", "KnowledgeRetrievalIntent", - "KnowledgeRetrievalLowReasoningEffort", - "KnowledgeRetrievalMediumReasoningEffort", "KnowledgeRetrievalMinimalReasoningEffort", "KnowledgeRetrievalReasoningEffort", "KnowledgeRetrievalSemanticIntent", @@ -107,17 +91,15 @@ "KnowledgeSourceParams", "KnowledgeSourceStatistics", "KnowledgeSourceStatus", + "KnowledgeSourceSynchronizationError", "KnowledgeSourceVectorizer", - "RemoteSharePointKnowledgeSourceParams", "SearchIndexKnowledgeSourceParams", - "SharePointSensitivityLabelInfo", "SynchronizationState", "WebKnowledgeSourceParams", "KnowledgeBaseActivityRecordType", "KnowledgeBaseMessageContentType", "KnowledgeBaseReferenceType", "KnowledgeRetrievalIntentType", - "KnowledgeRetrievalOutputMode", "KnowledgeRetrievalReasoningEffortKind", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/_enums.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/_enums.py index 5aa3bfa8f1bc..a48ad712217f 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/_enums.py @@ -17,18 +17,10 @@ class KnowledgeBaseActivityRecordType(str, Enum, metaclass=CaseInsensitiveEnumMe """Search index retrieval activity.""" AZURE_BLOB = "azureBlob" """Azure Blob retrieval activity.""" - INDEXED_SHARE_POINT = "indexedSharePoint" - """Indexed SharePoint retrieval activity.""" INDEXED_ONE_LAKE = "indexedOneLake" """Indexed OneLake retrieval activity.""" WEB = "web" """Web retrieval activity.""" - REMOTE_SHARE_POINT = "remoteSharePoint" - """Remote SharePoint retrieval activity.""" - MODEL_QUERY_PLANNING = "modelQueryPlanning" - """LLM query planning activity.""" - MODEL_ANSWER_SYNTHESIS = "modelAnswerSynthesis" - """LLM answer synthesis activity.""" AGENTIC_REASONING = "agenticReasoning" """Agentic reasoning activity.""" @@ -49,14 +41,10 @@ class KnowledgeBaseReferenceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Search index document reference.""" AZURE_BLOB = "azureBlob" """Azure Blob document reference.""" - INDEXED_SHARE_POINT = "indexedSharePoint" - """Indexed SharePoint document reference.""" INDEXED_ONE_LAKE = "indexedOneLake" """Indexed OneLake document reference.""" WEB = "web" """Web document reference.""" - REMOTE_SHARE_POINT = "remoteSharePoint" - """Remote SharePoint document reference.""" class KnowledgeRetrievalIntentType(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -66,21 +54,8 @@ class KnowledgeRetrievalIntentType(str, Enum, metaclass=CaseInsensitiveEnumMeta) """A natural language semantic query intent.""" -class KnowledgeRetrievalOutputMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The output configuration for this retrieval.""" - - EXTRACTIVE_DATA = "extractiveData" - """Return data from the knowledge sources directly without generative alteration.""" - ANSWER_SYNTHESIS = "answerSynthesis" - """Synthesize an answer for the response payload.""" - - class KnowledgeRetrievalReasoningEffortKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The amount of effort to use during retrieval.""" MINIMAL = "minimal" """Does not perform any source selections, query planning, or iterative search.""" - LOW = "low" - """Use low reasoning during retrieval.""" - MEDIUM = "medium" - """Use a moderate amount of reasoning during retrieval.""" diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/_models.py index 934420f67cd3..ea542126d196 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/models/_models.py @@ -64,7 +64,6 @@ class KnowledgeSourceParams(_Model): You probably want to use the sub-classes and not this class directly. Known sub-classes are: AzureBlobKnowledgeSourceParams, IndexedOneLakeKnowledgeSourceParams, - IndexedSharePointKnowledgeSourceParams, RemoteSharePointKnowledgeSourceParams, SearchIndexKnowledgeSourceParams, WebKnowledgeSourceParams :ivar knowledge_source_name: The name of the index the params apply to. Required. @@ -75,14 +74,11 @@ class KnowledgeSourceParams(_Model): :ivar include_reference_source_data: Indicates whether references should include the structured data obtained during retrieval in their payload. :vartype include_reference_source_data: bool - :ivar always_query_source: Indicates that this knowledge source should bypass source selection - and always be queried at retrieval time. - :vartype always_query_source: bool :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be included in the response. :vartype reranker_threshold: float :ivar kind: The type of the knowledge source. Required. Known values are: "searchIndex", - "azureBlob", "indexedSharePoint", "indexedOneLake", "web", and "remoteSharePoint". + "azureBlob", "indexedOneLake", and "web". :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeSourceKind """ @@ -100,18 +96,13 @@ class KnowledgeSourceParams(_Model): ) """Indicates whether references should include the structured data obtained during retrieval in their payload.""" - always_query_source: Optional[bool] = rest_field( - name="alwaysQuerySource", visibility=["read", "create", "update", "delete", "query"] - ) - """Indicates that this knowledge source should bypass source selection and always be queried at - retrieval time.""" reranker_threshold: Optional[float] = rest_field( name="rerankerThreshold", visibility=["read", "create", "update", "delete", "query"] ) """The reranker threshold all retrieved documents must meet to be included in the response.""" kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) """The type of the knowledge source. Required. Known values are: \"searchIndex\", \"azureBlob\", - \"indexedSharePoint\", \"indexedOneLake\", \"web\", and \"remoteSharePoint\".""" + \"indexedOneLake\", and \"web\".""" @overload def __init__( @@ -121,7 +112,6 @@ def __init__( kind: str, include_references: Optional[bool] = None, include_reference_source_data: Optional[bool] = None, - always_query_source: Optional[bool] = None, reranker_threshold: Optional[float] = None, ) -> None: ... @@ -147,9 +137,6 @@ class AzureBlobKnowledgeSourceParams(KnowledgeSourceParams, discriminator="azure :ivar include_reference_source_data: Indicates whether references should include the structured data obtained during retrieval in their payload. :vartype include_reference_source_data: bool - :ivar always_query_source: Indicates that this knowledge source should bypass source selection - and always be queried at retrieval time. - :vartype always_query_source: bool :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be included in the response. :vartype reranker_threshold: float @@ -169,7 +156,6 @@ def __init__( knowledge_source_name: str, include_references: Optional[bool] = None, include_reference_source_data: Optional[bool] = None, - always_query_source: Optional[bool] = None, reranker_threshold: Optional[float] = None, ) -> None: ... @@ -254,9 +240,6 @@ class IndexedOneLakeKnowledgeSourceParams(KnowledgeSourceParams, discriminator=" :ivar include_reference_source_data: Indicates whether references should include the structured data obtained during retrieval in their payload. :vartype include_reference_source_data: bool - :ivar always_query_source: Indicates that this knowledge source should bypass source selection - and always be queried at retrieval time. - :vartype always_query_source: bool :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be included in the response. :vartype reranker_threshold: float @@ -275,7 +258,6 @@ def __init__( knowledge_source_name: str, include_references: Optional[bool] = None, include_reference_source_data: Optional[bool] = None, - always_query_source: Optional[bool] = None, reranker_threshold: Optional[float] = None, ) -> None: ... @@ -291,67 +273,17 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.kind = KnowledgeSourceKind.INDEXED_ONE_LAKE # type: ignore -class IndexedSharePointKnowledgeSourceParams(KnowledgeSourceParams, discriminator="indexedSharePoint"): - """Specifies runtime parameters for a indexed SharePoint knowledge source. - - :ivar knowledge_source_name: The name of the index the params apply to. Required. - :vartype knowledge_source_name: str - :ivar include_references: Indicates whether references should be included for data retrieved - from this source. - :vartype include_references: bool - :ivar include_reference_source_data: Indicates whether references should include the structured - data obtained during retrieval in their payload. - :vartype include_reference_source_data: bool - :ivar always_query_source: Indicates that this knowledge source should bypass source selection - and always be queried at retrieval time. - :vartype always_query_source: bool - :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be - included in the response. - :vartype reranker_threshold: float - :ivar kind: The discriminator value. Required. A knowledge source that reads data from indexed - SharePoint. - :vartype kind: str or ~azure.search.documents.indexes.models.INDEXED_SHARE_POINT - """ - - kind: Literal[KnowledgeSourceKind.INDEXED_SHARE_POINT] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The discriminator value. Required. A knowledge source that reads data from indexed SharePoint.""" - - @overload - def __init__( - self, - *, - knowledge_source_name: str, - include_references: Optional[bool] = None, - include_reference_source_data: Optional[bool] = None, - always_query_source: Optional[bool] = None, - reranker_threshold: Optional[float] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.kind = KnowledgeSourceKind.INDEXED_SHARE_POINT # type: ignore - - class KnowledgeBaseActivityRecord(_Model): """Base type for activity records. Tracks execution details, timing, and errors for knowledge base operations. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - KnowledgeBaseAgenticReasoningActivityRecord, KnowledgeBaseModelAnswerSynthesisActivityRecord, - KnowledgeBaseModelQueryPlanningActivityRecord + KnowledgeBaseAgenticReasoningActivityRecord :ivar id: The ID of the activity record. Required. :vartype id: int :ivar type: The type of the activity record. Required. Known values are: "searchIndex", - "azureBlob", "indexedSharePoint", "indexedOneLake", "web", "remoteSharePoint", - "modelQueryPlanning", "modelAnswerSynthesis", and "agenticReasoning". + "azureBlob", "indexedOneLake", "web", and "agenticReasoning". :vartype type: str or ~azure.search.documents.knowledgebases.models.KnowledgeBaseActivityRecordType :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. @@ -366,8 +298,7 @@ class KnowledgeBaseActivityRecord(_Model): """The ID of the activity record. Required.""" type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The type of the activity record. Required. Known values are: \"searchIndex\", \"azureBlob\", - \"indexedSharePoint\", \"indexedOneLake\", \"web\", \"remoteSharePoint\", - \"modelQueryPlanning\", \"modelAnswerSynthesis\", and \"agenticReasoning\".""" + \"indexedOneLake\", \"web\", and \"agenticReasoning\".""" elapsed_ms: Optional[int] = rest_field(name="elapsedMs", visibility=["read", "create", "update", "delete", "query"]) """The elapsed time in milliseconds for the retrieval activity.""" error: Optional["_models.KnowledgeBaseErrorDetail"] = rest_field( @@ -457,11 +388,10 @@ class KnowledgeBaseReference(_Model): You probably want to use the sub-classes and not this class directly. Known sub-classes are: KnowledgeBaseAzureBlobReference, KnowledgeBaseIndexedOneLakeReference, - KnowledgeBaseIndexedSharePointReference, KnowledgeBaseRemoteSharePointReference, KnowledgeBaseSearchIndexReference, KnowledgeBaseWebReference :ivar type: The type of the reference. Required. Known values are: "searchIndex", "azureBlob", - "indexedSharePoint", "indexedOneLake", "web", and "remoteSharePoint". + "indexedOneLake", and "web". :vartype type: str or ~azure.search.documents.knowledgebases.models.KnowledgeBaseReferenceType :ivar id: The ID of the reference. Required. :vartype id: str @@ -476,7 +406,7 @@ class KnowledgeBaseReference(_Model): __mapping__: dict[str, _Model] = {} type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """The type of the reference. Required. Known values are: \"searchIndex\", \"azureBlob\", - \"indexedSharePoint\", \"indexedOneLake\", \"web\", and \"remoteSharePoint\".""" + \"indexedOneLake\", and \"web\".""" id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The ID of the reference. Required.""" activity_source: int = rest_field(name="activitySource", visibility=["read", "create", "update", "delete", "query"]) @@ -675,51 +605,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.type = KnowledgeBaseReferenceType.INDEXED_ONE_LAKE # type: ignore -class KnowledgeBaseIndexedSharePointReference(KnowledgeBaseReference, discriminator="indexedSharePoint"): - """Represents an indexed SharePoint document reference. - - :ivar id: The ID of the reference. Required. - :vartype id: str - :ivar activity_source: The source activity ID for the reference. Required. - :vartype activity_source: int - :ivar source_data: The source data for the reference. - :vartype source_data: dict[str, any] - :ivar reranker_score: The reranker score for the document reference. - :vartype reranker_score: float - :ivar type: The discriminator value. Required. Indexed SharePoint document reference. - :vartype type: str or ~azure.search.documents.knowledgebases.models.INDEXED_SHARE_POINT - :ivar doc_url: The document URL for the reference. - :vartype doc_url: str - """ - - type: Literal[KnowledgeBaseReferenceType.INDEXED_SHARE_POINT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The discriminator value. Required. Indexed SharePoint document reference.""" - doc_url: Optional[str] = rest_field(name="docUrl", visibility=["read", "create", "update", "delete", "query"]) - """The document URL for the reference.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - activity_source: int, - source_data: Optional[dict[str, Any]] = None, - reranker_score: Optional[float] = None, - doc_url: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.type = KnowledgeBaseReferenceType.INDEXED_SHARE_POINT # type: ignore - - class KnowledgeBaseMessage(_Model): """The natural language message style object. @@ -855,197 +740,22 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.type = KnowledgeBaseMessageContentType.TEXT # type: ignore -class KnowledgeBaseModelAnswerSynthesisActivityRecord( - KnowledgeBaseActivityRecord, discriminator="modelAnswerSynthesis" -): # pylint: disable=name-too-long - """Represents an LLM answer synthesis activity record. - - :ivar id: The ID of the activity record. Required. - :vartype id: int - :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :vartype elapsed_ms: int - :ivar error: The error detail explaining why the operation failed. This property is only - included when the activity does not succeed. - :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail - :ivar type: The discriminator value. Required. LLM answer synthesis activity. - :vartype type: str or ~azure.search.documents.knowledgebases.models.MODEL_ANSWER_SYNTHESIS - :ivar input_tokens: The number of input tokens for the LLM answer synthesis activity. - :vartype input_tokens: int - :ivar output_tokens: The number of output tokens for the LLM answer synthesis activity. - :vartype output_tokens: int - """ - - type: Literal[KnowledgeBaseActivityRecordType.MODEL_ANSWER_SYNTHESIS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The discriminator value. Required. LLM answer synthesis activity.""" - input_tokens: Optional[int] = rest_field( - name="inputTokens", visibility=["read", "create", "update", "delete", "query"] - ) - """The number of input tokens for the LLM answer synthesis activity.""" - output_tokens: Optional[int] = rest_field( - name="outputTokens", visibility=["read", "create", "update", "delete", "query"] - ) - """The number of output tokens for the LLM answer synthesis activity.""" - - @overload - def __init__( - self, - *, - id: int, # pylint: disable=redefined-builtin - elapsed_ms: Optional[int] = None, - error: Optional["_models.KnowledgeBaseErrorDetail"] = None, - input_tokens: Optional[int] = None, - output_tokens: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.type = KnowledgeBaseActivityRecordType.MODEL_ANSWER_SYNTHESIS # type: ignore - - -class KnowledgeBaseModelQueryPlanningActivityRecord( - KnowledgeBaseActivityRecord, discriminator="modelQueryPlanning" -): # pylint: disable=name-too-long - """Represents an LLM query planning activity record. - - :ivar id: The ID of the activity record. Required. - :vartype id: int - :ivar elapsed_ms: The elapsed time in milliseconds for the retrieval activity. - :vartype elapsed_ms: int - :ivar error: The error detail explaining why the operation failed. This property is only - included when the activity does not succeed. - :vartype error: ~azure.search.documents.knowledgebases.models.KnowledgeBaseErrorDetail - :ivar type: The discriminator value. Required. LLM query planning activity. - :vartype type: str or ~azure.search.documents.knowledgebases.models.MODEL_QUERY_PLANNING - :ivar input_tokens: The number of input tokens for the LLM query planning activity. - :vartype input_tokens: int - :ivar output_tokens: The number of output tokens for the LLM query planning activity. - :vartype output_tokens: int - """ - - type: Literal[KnowledgeBaseActivityRecordType.MODEL_QUERY_PLANNING] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The discriminator value. Required. LLM query planning activity.""" - input_tokens: Optional[int] = rest_field( - name="inputTokens", visibility=["read", "create", "update", "delete", "query"] - ) - """The number of input tokens for the LLM query planning activity.""" - output_tokens: Optional[int] = rest_field( - name="outputTokens", visibility=["read", "create", "update", "delete", "query"] - ) - """The number of output tokens for the LLM query planning activity.""" - - @overload - def __init__( - self, - *, - id: int, # pylint: disable=redefined-builtin - elapsed_ms: Optional[int] = None, - error: Optional["_models.KnowledgeBaseErrorDetail"] = None, - input_tokens: Optional[int] = None, - output_tokens: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.type = KnowledgeBaseActivityRecordType.MODEL_QUERY_PLANNING # type: ignore - - -class KnowledgeBaseRemoteSharePointReference(KnowledgeBaseReference, discriminator="remoteSharePoint"): - """Represents a remote SharePoint document reference. - - :ivar id: The ID of the reference. Required. - :vartype id: str - :ivar activity_source: The source activity ID for the reference. Required. - :vartype activity_source: int - :ivar source_data: The source data for the reference. - :vartype source_data: dict[str, any] - :ivar reranker_score: The reranker score for the document reference. - :vartype reranker_score: float - :ivar type: The discriminator value. Required. Remote SharePoint document reference. - :vartype type: str or ~azure.search.documents.knowledgebases.models.REMOTE_SHARE_POINT - :ivar web_url: The url the reference data originated from. - :vartype web_url: str - :ivar search_sensitivity_label_info: Information about the sensitivity label applied to the - SharePoint document. - :vartype search_sensitivity_label_info: - ~azure.search.documents.knowledgebases.models.SharePointSensitivityLabelInfo - """ - - type: Literal[KnowledgeBaseReferenceType.REMOTE_SHARE_POINT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The discriminator value. Required. Remote SharePoint document reference.""" - web_url: Optional[str] = rest_field(name="webUrl", visibility=["read", "create", "update", "delete", "query"]) - """The url the reference data originated from.""" - search_sensitivity_label_info: Optional["_models.SharePointSensitivityLabelInfo"] = rest_field( - name="searchSensitivityLabelInfo", visibility=["read", "create", "update", "delete", "query"] - ) - """Information about the sensitivity label applied to the SharePoint document.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - activity_source: int, - source_data: Optional[dict[str, Any]] = None, - reranker_score: Optional[float] = None, - web_url: Optional[str] = None, - search_sensitivity_label_info: Optional["_models.SharePointSensitivityLabelInfo"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.type = KnowledgeBaseReferenceType.REMOTE_SHARE_POINT # type: ignore - - class KnowledgeBaseRetrievalRequest(_Model): """The input contract for the retrieval request. - :ivar messages: A list of chat message style input. - :vartype messages: list[~azure.search.documents.knowledgebases.models.KnowledgeBaseMessage] :ivar intents: A list of intended queries to execute without model query planning. :vartype intents: list[~azure.search.documents.knowledgebases.models.KnowledgeRetrievalIntent] :ivar max_runtime_in_seconds: The maximum runtime in seconds. :vartype max_runtime_in_seconds: int - :ivar max_output_size: Limits the maximum size of the content in the output. - :vartype max_output_size: int - :ivar retrieval_reasoning_effort: The retrieval reasoning effort configuration. - :vartype retrieval_reasoning_effort: - ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffort + :ivar max_output_size_in_tokens: Limits the maximum size of the content in the output. + :vartype max_output_size_in_tokens: int :ivar include_activity: Indicates retrieval results should include activity information. :vartype include_activity: bool - :ivar output_mode: The output configuration for this retrieval. Known values are: - "extractiveData" and "answerSynthesis". - :vartype output_mode: str or - ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalOutputMode :ivar knowledge_source_params: A list of runtime parameters for the knowledge sources. :vartype knowledge_source_params: list[~azure.search.documents.knowledgebases.models.KnowledgeSourceParams] """ - messages: Optional[list["_models.KnowledgeBaseMessage"]] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """A list of chat message style input.""" intents: Optional[list["_models.KnowledgeRetrievalIntent"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) @@ -1054,23 +764,14 @@ class KnowledgeBaseRetrievalRequest(_Model): name="maxRuntimeInSeconds", visibility=["read", "create", "update", "delete", "query"] ) """The maximum runtime in seconds.""" - max_output_size: Optional[int] = rest_field( - name="maxOutputSize", visibility=["read", "create", "update", "delete", "query"] + max_output_size_in_tokens: Optional[int] = rest_field( + name="maxOutputSizeInTokens", visibility=["read", "create", "update", "delete", "query"] ) """Limits the maximum size of the content in the output.""" - retrieval_reasoning_effort: Optional["_models.KnowledgeRetrievalReasoningEffort"] = rest_field( - name="retrievalReasoningEffort", visibility=["read", "create", "update", "delete", "query"] - ) - """The retrieval reasoning effort configuration.""" include_activity: Optional[bool] = rest_field( name="includeActivity", visibility=["read", "create", "update", "delete", "query"] ) """Indicates retrieval results should include activity information.""" - output_mode: Optional[Union[str, "_models.KnowledgeRetrievalOutputMode"]] = rest_field( - name="outputMode", visibility=["read", "create", "update", "delete", "query"] - ) - """The output configuration for this retrieval. Known values are: \"extractiveData\" and - \"answerSynthesis\".""" knowledge_source_params: Optional[list["_models.KnowledgeSourceParams"]] = rest_field( name="knowledgeSourceParams", visibility=["read", "create", "update", "delete", "query"] ) @@ -1080,13 +781,10 @@ class KnowledgeBaseRetrievalRequest(_Model): def __init__( self, *, - messages: Optional[list["_models.KnowledgeBaseMessage"]] = None, intents: Optional[list["_models.KnowledgeRetrievalIntent"]] = None, max_runtime_in_seconds: Optional[int] = None, - max_output_size: Optional[int] = None, - retrieval_reasoning_effort: Optional["_models.KnowledgeRetrievalReasoningEffort"] = None, + max_output_size_in_tokens: Optional[int] = None, include_activity: Optional[bool] = None, - output_mode: Optional[Union[str, "_models.KnowledgeRetrievalOutputMode"]] = None, knowledge_source_params: Optional[list["_models.KnowledgeSourceParams"]] = None, ) -> None: ... @@ -1278,18 +976,16 @@ class KnowledgeRetrievalReasoningEffort(_Model): """Base type for reasoning effort. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - KnowledgeRetrievalLowReasoningEffort, KnowledgeRetrievalMediumReasoningEffort, KnowledgeRetrievalMinimalReasoningEffort - :ivar kind: The kind of reasoning effort. Required. Known values are: "minimal", "low", and - "medium". + :ivar kind: The kind of reasoning effort. Required. "minimal" :vartype kind: str or ~azure.search.documents.knowledgebases.models.KnowledgeRetrievalReasoningEffortKind """ __mapping__: dict[str, _Model] = {} kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) - """The kind of reasoning effort. Required. Known values are: \"minimal\", \"low\", and \"medium\".""" + """The kind of reasoning effort. Required. \"minimal\"""" @overload def __init__( @@ -1309,61 +1005,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class KnowledgeRetrievalLowReasoningEffort(KnowledgeRetrievalReasoningEffort, discriminator="low"): - """Run knowledge retrieval with low reasoning effort. - - :ivar kind: The discriminator value. Required. Use low reasoning during retrieval. - :vartype kind: str or ~azure.search.documents.knowledgebases.models.LOW - """ - - kind: Literal[KnowledgeRetrievalReasoningEffortKind.LOW] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The discriminator value. Required. Use low reasoning during retrieval.""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.kind = KnowledgeRetrievalReasoningEffortKind.LOW # type: ignore - - -class KnowledgeRetrievalMediumReasoningEffort(KnowledgeRetrievalReasoningEffort, discriminator="medium"): - """Run knowledge retrieval with medium reasoning effort. - - :ivar kind: The discriminator value. Required. Use a moderate amount of reasoning during - retrieval. - :vartype kind: str or ~azure.search.documents.knowledgebases.models.MEDIUM - """ - - kind: Literal[KnowledgeRetrievalReasoningEffortKind.MEDIUM] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The discriminator value. Required. Use a moderate amount of reasoning during retrieval.""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.kind = KnowledgeRetrievalReasoningEffortKind.MEDIUM # type: ignore - - class KnowledgeRetrievalMinimalReasoningEffort(KnowledgeRetrievalReasoningEffort, discriminator="minimal"): """Run knowledge retrieval with minimal reasoning effort. @@ -1640,6 +1281,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class KnowledgeSourceStatus(_Model): """Represents the status and synchronization history of a knowledge source. + :ivar kind: Identifies the Knowledge Source kind directly from the Status response. Known + values are: "searchIndex", "azureBlob", "indexedOneLake", and "web". + :vartype kind: str or ~azure.search.documents.indexes.models.KnowledgeSourceKind :ivar synchronization_status: The current synchronization status. Required. Known values are: "creating", "active", and "deleting". :vartype synchronization_status: str or @@ -1660,6 +1304,11 @@ class KnowledgeSourceStatus(_Model): :vartype statistics: ~azure.search.documents.knowledgebases.models.KnowledgeSourceStatistics """ + kind: Optional[Union[str, "_indexes_models3.KnowledgeSourceKind"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Identifies the Knowledge Source kind directly from the Status response. Known values are: + \"searchIndex\", \"azureBlob\", \"indexedOneLake\", and \"web\".""" synchronization_status: Union[str, "_indexes_models3.KnowledgeSourceSynchronizationStatus"] = rest_field( name="synchronizationStatus", visibility=["read", "create", "update", "delete", "query"] ) @@ -1687,6 +1336,7 @@ def __init__( self, *, synchronization_status: Union[str, "_indexes_models3.KnowledgeSourceSynchronizationStatus"], + kind: Optional[Union[str, "_indexes_models3.KnowledgeSourceKind"]] = None, synchronization_interval: Optional[str] = None, current_synchronization_state: Optional["_models.SynchronizationState"] = None, last_synchronization_state: Optional["_models.CompletedSynchronizationState"] = None, @@ -1704,51 +1354,52 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class RemoteSharePointKnowledgeSourceParams(KnowledgeSourceParams, discriminator="remoteSharePoint"): - """Specifies runtime parameters for a remote SharePoint knowledge source. - - :ivar knowledge_source_name: The name of the index the params apply to. Required. - :vartype knowledge_source_name: str - :ivar include_references: Indicates whether references should be included for data retrieved - from this source. - :vartype include_references: bool - :ivar include_reference_source_data: Indicates whether references should include the structured - data obtained during retrieval in their payload. - :vartype include_reference_source_data: bool - :ivar always_query_source: Indicates that this knowledge source should bypass source selection - and always be queried at retrieval time. - :vartype always_query_source: bool - :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be - included in the response. - :vartype reranker_threshold: float - :ivar kind: The discriminator value. Required. A knowledge source that reads data from remote - SharePoint. - :vartype kind: str or ~azure.search.documents.indexes.models.REMOTE_SHARE_POINT - :ivar filter_expression_add_on: A filter condition applied to the SharePoint data source. It - must be specified in the Keyword Query Language syntax. It will be combined as a conjunction - with the filter expression specified in the knowledge source definition. - :vartype filter_expression_add_on: str +class KnowledgeSourceSynchronizationError(_Model): + """Represents a document-level indexing error encountered during a knowledge source + synchronization run. + + :ivar doc_id: The unique identifier for the failed document or item within the synchronization + run. + :vartype doc_id: str + :ivar status_code: HTTP-like status code representing the failure category (e.g., 400). + :vartype status_code: int + :ivar name: Name of the ingestion or processing component reporting the error. + :vartype name: str + :ivar error_message: Human-readable, customer-visible error message. Required. + :vartype error_message: str + :ivar details: Additional contextual information about the failure. + :vartype details: str + :ivar documentation_link: A link to relevant troubleshooting documentation. + :vartype documentation_link: str """ - kind: Literal[KnowledgeSourceKind.REMOTE_SHARE_POINT] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The discriminator value. Required. A knowledge source that reads data from remote SharePoint.""" - filter_expression_add_on: Optional[str] = rest_field( - name="filterExpressionAddOn", visibility=["read", "create", "update", "delete", "query"] + doc_id: Optional[str] = rest_field(name="docId", visibility=["read", "create", "update", "delete", "query"]) + """The unique identifier for the failed document or item within the synchronization run.""" + status_code: Optional[int] = rest_field( + name="statusCode", visibility=["read", "create", "update", "delete", "query"] + ) + """HTTP-like status code representing the failure category (e.g., 400).""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of the ingestion or processing component reporting the error.""" + error_message: str = rest_field(name="errorMessage", visibility=["read", "create", "update", "delete", "query"]) + """Human-readable, customer-visible error message. Required.""" + details: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Additional contextual information about the failure.""" + documentation_link: Optional[str] = rest_field( + name="documentationLink", visibility=["read", "create", "update", "delete", "query"] ) - """A filter condition applied to the SharePoint data source. It must be specified in the Keyword - Query Language syntax. It will be combined as a conjunction with the filter expression - specified in the knowledge source definition.""" + """A link to relevant troubleshooting documentation.""" @overload def __init__( self, *, - knowledge_source_name: str, - include_references: Optional[bool] = None, - include_reference_source_data: Optional[bool] = None, - always_query_source: Optional[bool] = None, - reranker_threshold: Optional[float] = None, - filter_expression_add_on: Optional[str] = None, + error_message: str, + doc_id: Optional[str] = None, + status_code: Optional[int] = None, + name: Optional[str] = None, + details: Optional[str] = None, + documentation_link: Optional[str] = None, ) -> None: ... @overload @@ -1760,7 +1411,6 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) - self.kind = KnowledgeSourceKind.REMOTE_SHARE_POINT # type: ignore class SearchIndexKnowledgeSourceParams(KnowledgeSourceParams, discriminator="searchIndex"): @@ -1774,9 +1424,6 @@ class SearchIndexKnowledgeSourceParams(KnowledgeSourceParams, discriminator="sea :ivar include_reference_source_data: Indicates whether references should include the structured data obtained during retrieval in their payload. :vartype include_reference_source_data: bool - :ivar always_query_source: Indicates that this knowledge source should bypass source selection - and always be queried at retrieval time. - :vartype always_query_source: bool :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be included in the response. :vartype reranker_threshold: float @@ -1801,7 +1448,6 @@ def __init__( knowledge_source_name: str, include_references: Optional[bool] = None, include_reference_source_data: Optional[bool] = None, - always_query_source: Optional[bool] = None, reranker_threshold: Optional[float] = None, filter_add_on: Optional[str] = None, ) -> None: ... @@ -1818,65 +1464,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.kind = KnowledgeSourceKind.SEARCH_INDEX # type: ignore -class SharePointSensitivityLabelInfo(_Model): - """Information about the sensitivity label applied to a SharePoint document. - - :ivar display_name: The display name for the sensitivity label. - :vartype display_name: str - :ivar sensitivity_label_id: The ID of the sensitivity label. - :vartype sensitivity_label_id: str - :ivar tooltip: The tooltip that should be displayed for the label in a UI. - :vartype tooltip: str - :ivar priority: The priority in which the sensitivity label is applied. - :vartype priority: int - :ivar color: The color that the UI should display for the label, if configured. - :vartype color: str - :ivar is_encrypted: Indicates whether the sensitivity label enforces encryption. - :vartype is_encrypted: bool - """ - - display_name: Optional[str] = rest_field( - name="displayName", visibility=["read", "create", "update", "delete", "query"] - ) - """The display name for the sensitivity label.""" - sensitivity_label_id: Optional[str] = rest_field( - name="sensitivityLabelId", visibility=["read", "create", "update", "delete", "query"] - ) - """The ID of the sensitivity label.""" - tooltip: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The tooltip that should be displayed for the label in a UI.""" - priority: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The priority in which the sensitivity label is applied.""" - color: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The color that the UI should display for the label, if configured.""" - is_encrypted: Optional[bool] = rest_field( - name="isEncrypted", visibility=["read", "create", "update", "delete", "query"] - ) - """Indicates whether the sensitivity label enforces encryption.""" - - @overload - def __init__( - self, - *, - display_name: Optional[str] = None, - sensitivity_label_id: Optional[str] = None, - tooltip: Optional[str] = None, - priority: Optional[int] = None, - color: Optional[str] = None, - is_encrypted: Optional[bool] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class SynchronizationState(_Model): """Represents the current state of an ongoing synchronization that spans multiple indexer runs. @@ -1890,6 +1477,10 @@ class SynchronizationState(_Model): :vartype items_updates_failed: int :ivar items_skipped: The number of items skipped in the current synchronization. Required. :vartype items_skipped: int + :ivar errors: Collection of document-level indexing errors encountered during the current + synchronization run. Returned only when errors are present. + :vartype errors: + list[~azure.search.documents.knowledgebases.models.KnowledgeSourceSynchronizationError] """ start_time: datetime.datetime = rest_field( @@ -1906,6 +1497,11 @@ class SynchronizationState(_Model): """The number of item updates that failed in the current synchronization. Required.""" items_skipped: int = rest_field(name="itemsSkipped", visibility=["read", "create", "update", "delete", "query"]) """The number of items skipped in the current synchronization. Required.""" + errors: Optional[list["_models.KnowledgeSourceSynchronizationError"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Collection of document-level indexing errors encountered during the current synchronization + run. Returned only when errors are present.""" @overload def __init__( @@ -1915,6 +1511,7 @@ def __init__( items_updates_processed: int, items_updates_failed: int, items_skipped: int, + errors: Optional[list["_models.KnowledgeSourceSynchronizationError"]] = None, ) -> None: ... @overload @@ -1939,9 +1536,6 @@ class WebKnowledgeSourceParams(KnowledgeSourceParams, discriminator="web"): :ivar include_reference_source_data: Indicates whether references should include the structured data obtained during retrieval in their payload. :vartype include_reference_source_data: bool - :ivar always_query_source: Indicates that this knowledge source should bypass source selection - and always be queried at retrieval time. - :vartype always_query_source: bool :ivar reranker_threshold: The reranker threshold all retrieved documents must meet to be included in the response. :vartype reranker_threshold: float @@ -1975,7 +1569,6 @@ def __init__( knowledge_source_name: str, include_references: Optional[bool] = None, include_reference_source_data: Optional[bool] = None, - always_query_source: Optional[bool] = None, reranker_threshold: Optional[float] = None, language: Optional[str] = None, market: Optional[str] = None, diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py index 78fd13d0ebcd..f114daf84f72 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py @@ -21,30 +21,20 @@ ErrorDetail, ErrorResponse, FacetResult, - HybridSearch, IndexAction, IndexDocumentsBatch, IndexingResult, LookupDocument, QueryAnswerResult, QueryCaptionResult, - QueryResultDocumentInnerHit, - QueryResultDocumentRerankerInput, - QueryResultDocumentSemanticField, QueryResultDocumentSubscores, - QueryRewritesDebugInfo, - QueryRewritesValuesDebugInfo, SearchDocumentsResult, SearchRequest, SearchResult, - SearchScoreThreshold, - SemanticDebugInfo, SingleVectorFieldResult, SuggestResult, TextResult, VectorQuery, - VectorSimilarityThreshold, - VectorThreshold, VectorizableImageBinaryQuery, VectorizableImageUrlQuery, VectorizableTextQuery, @@ -53,26 +43,18 @@ ) from ._enums import ( # type: ignore - AutocompleteMode, - HybridCountAndFacetMode, IndexActionType, QueryAnswerType, QueryCaptionType, QueryDebugMode, - QueryLanguage, - QueryRewritesType, - QuerySpellerType, QueryType, ScoringStatistics, SearchMode, SemanticErrorMode, SemanticErrorReason, - SemanticFieldState, - SemanticQueryRewritesResultType, SemanticSearchResultsType, VectorFilterMode, VectorQueryKind, - VectorThresholdKind, ) from ._patch import __all__ as _patch_all from ._patch import * @@ -86,55 +68,37 @@ "ErrorDetail", "ErrorResponse", "FacetResult", - "HybridSearch", "IndexAction", "IndexDocumentsBatch", "IndexingResult", "LookupDocument", "QueryAnswerResult", "QueryCaptionResult", - "QueryResultDocumentInnerHit", - "QueryResultDocumentRerankerInput", - "QueryResultDocumentSemanticField", "QueryResultDocumentSubscores", - "QueryRewritesDebugInfo", - "QueryRewritesValuesDebugInfo", "SearchDocumentsResult", "SearchRequest", "SearchResult", - "SearchScoreThreshold", - "SemanticDebugInfo", "SingleVectorFieldResult", "SuggestResult", "TextResult", "VectorQuery", - "VectorSimilarityThreshold", - "VectorThreshold", "VectorizableImageBinaryQuery", "VectorizableImageUrlQuery", "VectorizableTextQuery", "VectorizedQuery", "VectorsDebugInfo", - "AutocompleteMode", - "HybridCountAndFacetMode", "IndexActionType", "QueryAnswerType", "QueryCaptionType", "QueryDebugMode", - "QueryLanguage", - "QueryRewritesType", - "QuerySpellerType", "QueryType", "ScoringStatistics", "SearchMode", "SemanticErrorMode", "SemanticErrorReason", - "SemanticFieldState", - "SemanticQueryRewritesResultType", "SemanticSearchResultsType", "VectorFilterMode", "VectorQueryKind", - "VectorThresholdKind", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/_enums.py b/sdk/search/azure-search-documents/azure/search/documents/models/_enums.py index 8d0896878044..617894eb5ecb 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/models/_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/models/_enums.py @@ -28,21 +28,6 @@ class AutocompleteMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): terms could include 'washington medicaid' and 'washington medical'.""" -class HybridCountAndFacetMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Determines whether the count and facets should includes all documents that matched the search - query, or only the documents that are retrieved within the 'maxTextRecallSize' window. The - default value is 'countAllResults'. - """ - - COUNT_RETRIEVABLE_RESULTS = "countRetrievableResults" - """Only include documents that were matched within the 'maxTextRecallSize' retrieval window when - computing 'count' and 'facets'.""" - COUNT_ALL_RESULTS = "countAllResults" - """Include all documents that were matched by the search query when computing 'count' and - 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' - retrieval window.""" - - class IndexActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The operation to perform on a document in an indexing batch.""" @@ -120,179 +105,6 @@ class QueryDebugMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Turn on all debug options.""" -class QueryLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The language of the query.""" - - NONE = "none" - """Query language not specified.""" - EN_US = "en-us" - """Query language value for English (United States).""" - EN_GB = "en-gb" - """Query language value for English (Great Britain).""" - EN_IN = "en-in" - """Query language value for English (India).""" - EN_CA = "en-ca" - """Query language value for English (Canada).""" - EN_AU = "en-au" - """Query language value for English (Australia).""" - FR_FR = "fr-fr" - """Query language value for French (France).""" - FR_CA = "fr-ca" - """Query language value for French (Canada).""" - DE_DE = "de-de" - """Query language value for German (Germany).""" - ES_ES = "es-es" - """Query language value for Spanish (Spain).""" - ES_MX = "es-mx" - """Query language value for Spanish (Mexico).""" - ZH_CN = "zh-cn" - """Query language value for Chinese (China).""" - ZH_TW = "zh-tw" - """Query language value for Chinese (Taiwan).""" - PT_BR = "pt-br" - """Query language value for Portuguese (Brazil).""" - PT_PT = "pt-pt" - """Query language value for Portuguese (Portugal).""" - IT_IT = "it-it" - """Query language value for Italian (Italy).""" - JA_JP = "ja-jp" - """Query language value for Japanese (Japan).""" - KO_KR = "ko-kr" - """Query language value for Korean (Korea).""" - RU_RU = "ru-ru" - """Query language value for Russian (Russia).""" - CS_CZ = "cs-cz" - """Query language value for Czech (Czech Republic).""" - NL_BE = "nl-be" - """Query language value for Dutch (Belgium).""" - NL_NL = "nl-nl" - """Query language value for Dutch (Netherlands).""" - HU_HU = "hu-hu" - """Query language value for Hungarian (Hungary).""" - PL_PL = "pl-pl" - """Query language value for Polish (Poland).""" - SV_SE = "sv-se" - """Query language value for Swedish (Sweden).""" - TR_TR = "tr-tr" - """Query language value for Turkish (Turkey).""" - HI_IN = "hi-in" - """Query language value for Hindi (India).""" - AR_SA = "ar-sa" - """Query language value for Arabic (Saudi Arabia).""" - AR_EG = "ar-eg" - """Query language value for Arabic (Egypt).""" - AR_MA = "ar-ma" - """Query language value for Arabic (Morocco).""" - AR_KW = "ar-kw" - """Query language value for Arabic (Kuwait).""" - AR_JO = "ar-jo" - """Query language value for Arabic (Jordan).""" - DA_DK = "da-dk" - """Query language value for Danish (Denmark).""" - NO_NO = "no-no" - """Query language value for Norwegian (Norway).""" - BG_BG = "bg-bg" - """Query language value for Bulgarian (Bulgaria).""" - HR_HR = "hr-hr" - """Query language value for Croatian (Croatia).""" - HR_BA = "hr-ba" - """Query language value for Croatian (Bosnia and Herzegovina).""" - MS_MY = "ms-my" - """Query language value for Malay (Malaysia).""" - MS_BN = "ms-bn" - """Query language value for Malay (Brunei Darussalam).""" - SL_SL = "sl-sl" - """Query language value for Slovenian (Slovenia).""" - TA_IN = "ta-in" - """Query language value for Tamil (India).""" - VI_VN = "vi-vn" - """Query language value for Vietnamese (Viet Nam).""" - EL_GR = "el-gr" - """Query language value for Greek (Greece).""" - RO_RO = "ro-ro" - """Query language value for Romanian (Romania).""" - IS_IS = "is-is" - """Query language value for Icelandic (Iceland).""" - ID_ID = "id-id" - """Query language value for Indonesian (Indonesia).""" - TH_TH = "th-th" - """Query language value for Thai (Thailand).""" - LT_LT = "lt-lt" - """Query language value for Lithuanian (Lithuania).""" - UK_UA = "uk-ua" - """Query language value for Ukrainian (Ukraine).""" - LV_LV = "lv-lv" - """Query language value for Latvian (Latvia).""" - ET_EE = "et-ee" - """Query language value for Estonian (Estonia).""" - CA_ES = "ca-es" - """Query language value for Catalan.""" - FI_FI = "fi-fi" - """Query language value for Finnish (Finland).""" - SR_BA = "sr-ba" - """Query language value for Serbian (Bosnia and Herzegovina).""" - SR_ME = "sr-me" - """Query language value for Serbian (Montenegro).""" - SR_RS = "sr-rs" - """Query language value for Serbian (Serbia).""" - SK_SK = "sk-sk" - """Query language value for Slovak (Slovakia).""" - NB_NO = "nb-no" - """Query language value for Norwegian (Norway).""" - HY_AM = "hy-am" - """Query language value for Armenian (Armenia).""" - BN_IN = "bn-in" - """Query language value for Bengali (India).""" - EU_ES = "eu-es" - """Query language value for Basque.""" - GL_ES = "gl-es" - """Query language value for Galician.""" - GU_IN = "gu-in" - """Query language value for Gujarati (India).""" - HE_IL = "he-il" - """Query language value for Hebrew (Israel).""" - GA_IE = "ga-ie" - """Query language value for Irish (Ireland).""" - KN_IN = "kn-in" - """Query language value for Kannada (India).""" - ML_IN = "ml-in" - """Query language value for Malayalam (India).""" - MR_IN = "mr-in" - """Query language value for Marathi (India).""" - FA_AE = "fa-ae" - """Query language value for Persian (U.A.E.).""" - PA_IN = "pa-in" - """Query language value for Punjabi (India).""" - TE_IN = "te-in" - """Query language value for Telugu (India).""" - UR_PK = "ur-pk" - """Query language value for Urdu (Pakistan).""" - - -class QueryRewritesType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """This parameter is only valid if the query type is ``semantic``. When QueryRewrites is set to - ``generative``, the query terms are sent to a generate model which will produce 10 (default) - rewrites to help increase the recall of the request. The requested count can be configured by - appending the pipe character ``|`` followed by the ``count-`` option, such - as ``generative|count-3``. Defaults to ``None``. - """ - - NONE = "none" - """Do not generate additional query rewrites for this query.""" - GENERATIVE = "generative" - """Generate alternative query terms to increase the recall of a search request.""" - - -class QuerySpellerType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Improve search recall by spell-correcting individual search query terms.""" - - NONE = "none" - """Speller not enabled.""" - LEXICON = "lexicon" - """Speller corrects individual query terms using a static lexicon for the language specified by - the queryLanguage parameter.""" - - class QueryType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax and 'semantic' if query syntax is not needed. @@ -361,25 +173,6 @@ class SemanticErrorReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): """At least one step of the semantic process failed.""" -class SemanticFieldState(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The way the field was used for the semantic enrichment process.""" - - USED = "used" - """The field was fully used for semantic enrichment.""" - UNUSED = "unused" - """The field was not used for semantic enrichment.""" - PARTIAL = "partial" - """The field was partially used for semantic enrichment.""" - - -class SemanticQueryRewritesResultType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Type of query rewrite that was used for this request.""" - - ORIGINAL_QUERY_ONLY = "originalQueryOnly" - """Query rewrites were not successfully generated for this request. Only the original query was - used to retrieve the results.""" - - class SemanticSearchResultsType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Type of partial response that was returned for a semantic ranking request.""" @@ -416,17 +209,3 @@ class VectorQueryKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): IMAGE_BINARY = "imageBinary" """Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided.""" - - -class VectorThresholdKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The kind of threshold used to filter vector queries.""" - - VECTOR_SIMILARITY = "vectorSimilarity" - """The results of the vector query will be filtered based on the vector similarity metric. Note - this is the canonical definition of similarity metric, not the 'distance' version. The - threshold direction (larger or smaller) will be chosen automatically according to the metric - used by the field.""" - SEARCH_SCORE = "searchScore" - """The results of the vector query will filter based on the '@search.score' value. Note this is - the @search.score returned as part of the search response. The threshold direction will be - chosen for higher @search.score.""" diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/models/_models.py index aea242bf9ed1..7ff6a0acf51d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/models/_models.py @@ -11,7 +11,7 @@ from typing import Any, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload from .._utils.model_base import Model as _Model, rest_discriminator, rest_field -from ._enums import VectorQueryKind, VectorThresholdKind +from ._enums import VectorQueryKind if TYPE_CHECKING: from .. import models as _models @@ -50,38 +50,18 @@ class AutocompleteResult(_Model): class DebugInfo(_Model): - """Contains debugging information that can be used to further explore your search results. - - :ivar query_rewrites: Contains debugging information specific to query rewrites. - :vartype query_rewrites: ~azure.search.documents.models.QueryRewritesDebugInfo - """ - - query_rewrites: Optional["_models.QueryRewritesDebugInfo"] = rest_field(name="queryRewrites", visibility=["read"]) - """Contains debugging information specific to query rewrites.""" + """Contains debugging information that can be used to further explore your search results.""" class DocumentDebugInfo(_Model): """Contains debugging information that can be used to further explore your search results. - :ivar semantic: Contains debugging information specific to semantic ranking requests. - :vartype semantic: ~azure.search.documents.models.SemanticDebugInfo :ivar vectors: Contains debugging information specific to vector and hybrid search. :vartype vectors: ~azure.search.documents.models.VectorsDebugInfo - :ivar inner_hits: Contains debugging information specific to vectors matched within a - collection of complex types. - :vartype inner_hits: dict[str, - list[~azure.search.documents.models.QueryResultDocumentInnerHit]] """ - semantic: Optional["_models.SemanticDebugInfo"] = rest_field(visibility=["read"]) - """Contains debugging information specific to semantic ranking requests.""" vectors: Optional["_models.VectorsDebugInfo"] = rest_field(visibility=["read"]) """Contains debugging information specific to vector and hybrid search.""" - inner_hits: Optional[dict[str, list["_models.QueryResultDocumentInnerHit"]]] = rest_field( - name="innerHits", visibility=["read"] - ) - """Contains debugging information specific to vectors matched within a collection of complex - types.""" class ErrorAdditionalInfo(_Model): @@ -164,89 +144,10 @@ class FacetResult(_Model): :ivar count: The approximate count of documents falling within the bucket described by this facet. :vartype count: int - :ivar avg: The resulting total avg for the facet when a avg metric is requested. - :vartype avg: float - :ivar min: The resulting total min for the facet when a min metric is requested. - :vartype min: float - :ivar max: The resulting total max for the facet when a max metric is requested. - :vartype max: float - :ivar sum: The resulting total sum for the facet when a sum metric is requested. - :vartype sum: float - :ivar cardinality: The resulting total cardinality for the facet when a cardinality metric is - requested. - :vartype cardinality: int - :ivar facets: The nested facet query results for the search operation, organized as a - collection of buckets for each faceted field; null if the query did not contain any nested - facets. - :vartype facets: dict[str, list[~azure.search.documents.models.FacetResult]] """ count: Optional[int] = rest_field(visibility=["read"]) """The approximate count of documents falling within the bucket described by this facet.""" - avg: Optional[float] = rest_field(visibility=["read"]) - """The resulting total avg for the facet when a avg metric is requested.""" - min: Optional[float] = rest_field(visibility=["read"]) - """The resulting total min for the facet when a min metric is requested.""" - max: Optional[float] = rest_field(visibility=["read"]) - """The resulting total max for the facet when a max metric is requested.""" - sum: Optional[float] = rest_field(visibility=["read"]) - """The resulting total sum for the facet when a sum metric is requested.""" - cardinality: Optional[int] = rest_field(visibility=["read"]) - """The resulting total cardinality for the facet when a cardinality metric is requested.""" - facets: Optional[dict[str, list["_models.FacetResult"]]] = rest_field(name="@search.facets", visibility=["read"]) - """The nested facet query results for the search operation, organized as a collection of buckets - for each faceted field; null if the query did not contain any nested facets.""" - - -class HybridSearch(_Model): - """TThe query parameters to configure hybrid search behaviors. - - :ivar max_text_recall_size: Determines the maximum number of documents to be retrieved by the - text query portion of a hybrid search request. Those documents will be combined with the - documents matching the vector queries to produce a single final list of results. Choosing a - larger maxTextRecallSize value will allow retrieving and paging through more documents (using - the top and skip parameters), at the cost of higher resource utilization and higher latency. - The value needs to be between 1 and 10,000. Default is 1000. - :vartype max_text_recall_size: int - :ivar count_and_facet_mode: Determines whether the count and facets should includes all - documents that matched the search query, or only the documents that are retrieved within the - 'maxTextRecallSize' window. Known values are: "countRetrievableResults" and "countAllResults". - :vartype count_and_facet_mode: str or ~azure.search.documents.models.HybridCountAndFacetMode - """ - - max_text_recall_size: Optional[int] = rest_field( - name="maxTextRecallSize", visibility=["read", "create", "update", "delete", "query"] - ) - """Determines the maximum number of documents to be retrieved by the text query portion of a - hybrid search request. Those documents will be combined with the documents matching the vector - queries to produce a single final list of results. Choosing a larger maxTextRecallSize value - will allow retrieving and paging through more documents (using the top and skip parameters), at - the cost of higher resource utilization and higher latency. The value needs to be between 1 and - 10,000. Default is 1000.""" - count_and_facet_mode: Optional[Union[str, "_models.HybridCountAndFacetMode"]] = rest_field( - name="countAndFacetMode", visibility=["read", "create", "update", "delete", "query"] - ) - """Determines whether the count and facets should includes all documents that matched the search - query, or only the documents that are retrieved within the 'maxTextRecallSize' window. Known - values are: \"countRetrievableResults\" and \"countAllResults\".""" - - @overload - def __init__( - self, - *, - max_text_recall_size: Optional[int] = None, - count_and_facet_mode: Optional[Union[str, "_models.HybridCountAndFacetMode"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) class IndexAction(_Model): @@ -408,64 +309,6 @@ class QueryCaptionResult(_Model): """Same text passage as in the Text property with highlighted phrases most relevant to the query.""" -class QueryResultDocumentInnerHit(_Model): - """Detailed scoring information for an individual element of a complex collection. - - :ivar ordinal: Position of this specific matching element within it's original collection. - Position starts at 0. - :vartype ordinal: int - :ivar vectors: Detailed scoring information for an individual element of a complex collection - that matched a vector query. - :vartype vectors: list[dict[str, ~azure.search.documents.models.SingleVectorFieldResult]] - """ - - ordinal: Optional[int] = rest_field(visibility=["read"]) - """Position of this specific matching element within it's original collection. Position starts at - 0.""" - vectors: Optional[list[dict[str, "_models.SingleVectorFieldResult"]]] = rest_field(visibility=["read"]) - """Detailed scoring information for an individual element of a complex collection that matched a - vector query.""" - - -class QueryResultDocumentRerankerInput(_Model): - """The raw concatenated strings that were sent to the semantic enrichment process. - - :ivar title: The raw string for the title field that was used for semantic enrichment. - :vartype title: str - :ivar content: The raw concatenated strings for the content fields that were used for semantic - enrichment. - :vartype content: str - :ivar keywords: The raw concatenated strings for the keyword fields that were used for semantic - enrichment. - :vartype keywords: str - """ - - title: Optional[str] = rest_field(visibility=["read"]) - """The raw string for the title field that was used for semantic enrichment.""" - content: Optional[str] = rest_field(visibility=["read"]) - """The raw concatenated strings for the content fields that were used for semantic enrichment.""" - keywords: Optional[str] = rest_field(visibility=["read"]) - """The raw concatenated strings for the keyword fields that were used for semantic enrichment.""" - - -class QueryResultDocumentSemanticField(_Model): - """Description of fields that were sent to the semantic enrichment process, as well as how they - were used. - - :ivar name: The name of the field that was sent to the semantic enrichment process. - :vartype name: str - :ivar state: The way the field was used for the semantic enrichment process (fully used, - partially used, or unused). Known values are: "used", "unused", and "partial". - :vartype state: str or ~azure.search.documents.models.SemanticFieldState - """ - - name: Optional[str] = rest_field(visibility=["read"]) - """The name of the field that was sent to the semantic enrichment process.""" - state: Optional[Union[str, "_models.SemanticFieldState"]] = rest_field(visibility=["read"]) - """The way the field was used for the semantic enrichment process (fully used, partially used, or - unused). Known values are: \"used\", \"unused\", and \"partial\".""" - - class QueryResultDocumentSubscores(_Model): """The breakdown of subscores between the text and vector query components of the search query for this document. Each vector query is shown as a separate object in the same order they were @@ -487,38 +330,6 @@ class QueryResultDocumentSubscores(_Model): """The BM25 or Classic score for the text portion of the query.""" -class QueryRewritesDebugInfo(_Model): - """Contains debugging information specific to query rewrites. - - :ivar text: List of query rewrites generated for the text query. - :vartype text: ~azure.search.documents.models.QueryRewritesValuesDebugInfo - :ivar vectors: List of query rewrites generated for the vectorizable text queries. - :vartype vectors: list[~azure.search.documents.models.QueryRewritesValuesDebugInfo] - """ - - text: Optional["_models.QueryRewritesValuesDebugInfo"] = rest_field(visibility=["read"]) - """List of query rewrites generated for the text query.""" - vectors: Optional[list["_models.QueryRewritesValuesDebugInfo"]] = rest_field(visibility=["read"]) - """List of query rewrites generated for the vectorizable text queries.""" - - -class QueryRewritesValuesDebugInfo(_Model): - """Contains debugging information specific to query rewrites. - - :ivar input_query: The input text to the generative query rewriting model. There may be cases - where the user query and the input to the generative model are not identical. - :vartype input_query: str - :ivar rewrites: List of query rewrites. - :vartype rewrites: list[str] - """ - - input_query: Optional[str] = rest_field(name="inputQuery", visibility=["read"]) - """The input text to the generative query rewriting model. There may be cases where the user query - and the input to the generative model are not identical.""" - rewrites: Optional[list[str]] = rest_field(visibility=["read"]) - """List of query rewrites.""" - - class SearchDocumentsResult(_Model): """Response containing search results from an index. @@ -536,8 +347,6 @@ class SearchDocumentsResult(_Model): :ivar answers: The answers query results for the search operation; null if the answers query parameter was not specified or set to 'none'. :vartype answers: list[~azure.search.documents.models.QueryAnswerResult] - :ivar debug_info: Debug information that applies to the search results as a whole. - :vartype debug_info: ~azure.search.documents.models.DebugInfo :ivar next_page_parameters: Continuation JSON payload returned when the query can't return all the requested results in a single response. You can use this JSON along with. :vartype next_page_parameters: ~azure.search.documents.models.SearchRequest @@ -557,10 +366,6 @@ class SearchDocumentsResult(_Model): ranking request. Known values are: "baseResults" and "rerankedResults". :vartype semantic_partial_response_type: str or ~azure.search.documents.models.SemanticSearchResultsType - :ivar semantic_query_rewrites_result_type: Type of query rewrite that was used to retrieve - documents. "originalQueryOnly" - :vartype semantic_query_rewrites_result_type: str or - ~azure.search.documents.models.SemanticQueryRewritesResultType """ count: Optional[int] = rest_field(name="@odata.count", visibility=["read"]) @@ -577,8 +382,6 @@ class SearchDocumentsResult(_Model): answers: Optional[list["_models.QueryAnswerResult"]] = rest_field(name="@search.answers", visibility=["read"]) """The answers query results for the search operation; null if the answers query parameter was not specified or set to 'none'.""" - debug_info: Optional["_models.DebugInfo"] = rest_field(name="@search.debug", visibility=["read"]) - """Debug information that applies to the search results as a whole.""" next_page_parameters: Optional["_models.SearchRequest"] = rest_field( name="@search.nextPageParameters", visibility=["read"] ) @@ -601,10 +404,6 @@ class SearchDocumentsResult(_Model): ) """Type of partial response that was returned for a semantic ranking request. Known values are: \"baseResults\" and \"rerankedResults\".""" - semantic_query_rewrites_result_type: Optional[Union[str, "_models.SemanticQueryRewritesResultType"]] = rest_field( - name="@search.semanticQueryRewritesResultType", visibility=["read"] - ) - """Type of query rewrite that was used to retrieve documents. \"originalQueryOnly\"""" class SearchRequest(_Model): @@ -680,19 +479,6 @@ class SearchRequest(_Model): :ivar search_mode: A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. Known values are: "any" and "all". :vartype search_mode: str or ~azure.search.documents.models.SearchMode - :ivar query_language: A value that specifies the language of the search query. Known values - are: "none", "en-us", "en-gb", "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", - "es-mx", "zh-cn", "zh-tw", "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", - "nl-be", "nl-nl", "hu-hu", "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", - "ar-kw", "ar-jo", "da-dk", "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", - "ta-in", "vi-vn", "el-gr", "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", - "et-ee", "ca-es", "fi-fi", "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", - "eu-es", "gl-es", "gu-in", "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", - "te-in", and "ur-pk". - :vartype query_language: str or ~azure.search.documents.models.QueryLanguage - :ivar query_speller: A value that specifies the type of the speller to use to spell-correct - individual search query terms. Known values are: "none" and "lexicon". - :vartype query_speller: str or ~azure.search.documents.models.QuerySpellerType :ivar select: The comma-separated list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. :vartype select: list[str] @@ -726,19 +512,12 @@ class SearchRequest(_Model): :ivar captions: A value that specifies whether captions should be returned as part of the search response. Known values are: "none" and "extractive". :vartype captions: str or ~azure.search.documents.models.QueryCaptionType - :ivar query_rewrites: A value that specifies whether query rewrites should be generated to - augment the search query. Known values are: "none" and "generative". - :vartype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType - :ivar semantic_fields: The comma-separated list of field names used for semantic ranking. - :vartype semantic_fields: list[str] :ivar vector_queries: The query parameters for vector and hybrid search queries. :vartype vector_queries: list[~azure.search.documents.models.VectorQuery] :ivar vector_filter_mode: Determines whether or not filters are applied before or after the vector search is performed. Default is 'preFilter' for new indexes. Known values are: "postFilter", "preFilter", and "strictPostFilter". :vartype vector_filter_mode: str or ~azure.search.documents.models.VectorFilterMode - :ivar hybrid_search: The query parameters to configure hybrid search behaviors. - :vartype hybrid_search: ~azure.search.documents.models.HybridSearch """ include_total_count: Optional[bool] = rest_field( @@ -833,24 +612,6 @@ class SearchRequest(_Model): ) """A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. Known values are: \"any\" and \"all\".""" - query_language: Optional[Union[str, "_models.QueryLanguage"]] = rest_field( - name="queryLanguage", visibility=["read", "create", "update", "delete", "query"] - ) - """A value that specifies the language of the search query. Known values are: \"none\", \"en-us\", - \"en-gb\", \"en-in\", \"en-ca\", \"en-au\", \"fr-fr\", \"fr-ca\", \"de-de\", \"es-es\", - \"es-mx\", \"zh-cn\", \"zh-tw\", \"pt-br\", \"pt-pt\", \"it-it\", \"ja-jp\", \"ko-kr\", - \"ru-ru\", \"cs-cz\", \"nl-be\", \"nl-nl\", \"hu-hu\", \"pl-pl\", \"sv-se\", \"tr-tr\", - \"hi-in\", \"ar-sa\", \"ar-eg\", \"ar-ma\", \"ar-kw\", \"ar-jo\", \"da-dk\", \"no-no\", - \"bg-bg\", \"hr-hr\", \"hr-ba\", \"ms-my\", \"ms-bn\", \"sl-sl\", \"ta-in\", \"vi-vn\", - \"el-gr\", \"ro-ro\", \"is-is\", \"id-id\", \"th-th\", \"lt-lt\", \"uk-ua\", \"lv-lv\", - \"et-ee\", \"ca-es\", \"fi-fi\", \"sr-ba\", \"sr-me\", \"sr-rs\", \"sk-sk\", \"nb-no\", - \"hy-am\", \"bn-in\", \"eu-es\", \"gl-es\", \"gu-in\", \"he-il\", \"ga-ie\", \"kn-in\", - \"ml-in\", \"mr-in\", \"fa-ae\", \"pa-in\", \"te-in\", and \"ur-pk\".""" - query_speller: Optional[Union[str, "_models.QuerySpellerType"]] = rest_field( - name="speller", visibility=["read", "create", "update", "delete", "query"] - ) - """A value that specifies the type of the speller to use to spell-correct individual search query - terms. Known values are: \"none\" and \"lexicon\".""" select: Optional[list[str]] = rest_field( visibility=["read", "create", "update", "delete", "query"], format="commaDelimited" ) @@ -896,15 +657,6 @@ class SearchRequest(_Model): ) """A value that specifies whether captions should be returned as part of the search response. Known values are: \"none\" and \"extractive\".""" - query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = rest_field( - name="queryRewrites", visibility=["read", "create", "update", "delete", "query"] - ) - """A value that specifies whether query rewrites should be generated to augment the search query. - Known values are: \"none\" and \"generative\".""" - semantic_fields: Optional[list[str]] = rest_field( - name="semanticFields", visibility=["read", "create", "update", "delete", "query"], format="commaDelimited" - ) - """The comma-separated list of field names used for semantic ranking.""" vector_queries: Optional[list["_models.VectorQuery"]] = rest_field( name="vectorQueries", visibility=["read", "create", "update", "delete", "query"] ) @@ -915,10 +667,6 @@ class SearchRequest(_Model): """Determines whether or not filters are applied before or after the vector search is performed. Default is 'preFilter' for new indexes. Known values are: \"postFilter\", \"preFilter\", and \"strictPostFilter\".""" - hybrid_search: Optional["_models.HybridSearch"] = rest_field( - name="hybridSearch", visibility=["read", "create", "update", "delete", "query"] - ) - """The query parameters to configure hybrid search behaviors.""" @overload def __init__( # pylint: disable=too-many-locals @@ -941,8 +689,6 @@ def __init__( # pylint: disable=too-many-locals search_text: Optional[str] = None, search_fields: Optional[list[str]] = None, search_mode: Optional[Union[str, "_models.SearchMode"]] = None, - query_language: Optional[Union[str, "_models.QueryLanguage"]] = None, - query_speller: Optional[Union[str, "_models.QuerySpellerType"]] = None, select: Optional[list[str]] = None, skip: Optional[int] = None, top: Optional[int] = None, @@ -952,11 +698,8 @@ def __init__( # pylint: disable=too-many-locals semantic_query: Optional[str] = None, answers: Optional[Union[str, "_models.QueryAnswerType"]] = None, captions: Optional[Union[str, "_models.QueryCaptionType"]] = None, - query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = None, - semantic_fields: Optional[list[str]] = None, vector_queries: Optional[list["_models.VectorQuery"]] = None, vector_filter_mode: Optional[Union[str, "_models.VectorFilterMode"]] = None, - hybrid_search: Optional["_models.HybridSearch"] = None, ) -> None: ... @overload @@ -1021,114 +764,6 @@ class SearchResult(_Model): """Contains debugging information that can be used to further explore your search results.""" -class VectorThreshold(_Model): - """The threshold used for vector queries. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - SearchScoreThreshold, VectorSimilarityThreshold - - :ivar kind: Type of threshold. Required. Known values are: "vectorSimilarity" and - "searchScore". - :vartype kind: str or ~azure.search.documents.models.VectorThresholdKind - """ - - __mapping__: dict[str, _Model] = {} - kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) - """Type of threshold. Required. Known values are: \"vectorSimilarity\" and \"searchScore\".""" - - @overload - def __init__( - self, - *, - kind: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class SearchScoreThreshold(VectorThreshold, discriminator="searchScore"): - """The results of the vector query will filter based on the '. - - :ivar value: The threshold will filter based on the '. Required. - :vartype value: float - :ivar kind: The kind of threshold used to filter vector queries. Required. The results of the - vector query will filter based on the '@search.score' value. Note this is the @search.score - returned as part of the search response. The threshold direction will be chosen for higher - @search.score. - :vartype kind: str or ~azure.search.documents.models.SEARCH_SCORE - """ - - value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The threshold will filter based on the '. Required.""" - kind: Literal[VectorThresholdKind.SEARCH_SCORE] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The kind of threshold used to filter vector queries. Required. The results of the vector query - will filter based on the '@search.score' value. Note this is the @search.score returned as part - of the search response. The threshold direction will be chosen for higher @search.score.""" - - @overload - def __init__( - self, - *, - value: float, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.kind = VectorThresholdKind.SEARCH_SCORE # type: ignore - - -class SemanticDebugInfo(_Model): - """Contains debugging information specific to semantic ranking requests. - - :ivar title_field: The title field that was sent to the semantic enrichment process, as well as - how it was used. - :vartype title_field: ~azure.search.documents.models.QueryResultDocumentSemanticField - :ivar content_fields: The content fields that were sent to the semantic enrichment process, as - well as how they were used. - :vartype content_fields: list[~azure.search.documents.models.QueryResultDocumentSemanticField] - :ivar keyword_fields: The keyword fields that were sent to the semantic enrichment process, as - well as how they were used. - :vartype keyword_fields: list[~azure.search.documents.models.QueryResultDocumentSemanticField] - :ivar reranker_input: The raw concatenated strings that were sent to the semantic enrichment - process. - :vartype reranker_input: ~azure.search.documents.models.QueryResultDocumentRerankerInput - """ - - title_field: Optional["_models.QueryResultDocumentSemanticField"] = rest_field( - name="titleField", visibility=["read"] - ) - """The title field that was sent to the semantic enrichment process, as well as how it was used.""" - content_fields: Optional[list["_models.QueryResultDocumentSemanticField"]] = rest_field( - name="contentFields", visibility=["read"] - ) - """The content fields that were sent to the semantic enrichment process, as well as how they were - used.""" - keyword_fields: Optional[list["_models.QueryResultDocumentSemanticField"]] = rest_field( - name="keywordFields", visibility=["read"] - ) - """The keyword fields that were sent to the semantic enrichment process, as well as how they were - used.""" - reranker_input: Optional["_models.QueryResultDocumentRerankerInput"] = rest_field( - name="rerankerInput", visibility=["read"] - ) - """The raw concatenated strings that were sent to the semantic enrichment process.""" - - class SingleVectorFieldResult(_Model): """A single vector field result. Both. @@ -1214,18 +849,6 @@ class VectorQuery(_Model): will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. :vartype weight: float - :ivar threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :vartype threshold: ~azure.search.documents.models.VectorThreshold - :ivar filter_override: The OData filter expression to apply to this specific vector query. If - no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :vartype filter_override: str - :ivar per_document_vector_limit: Controls how many vectors can be matched from each document in - a vector search query. Setting it to 1 ensures at most one vector per document is matched, - guaranteeing results come from distinct documents. Setting it to 0 (unlimited) allows multiple - relevant vectors from the same document to be matched. Default is 0. - :vartype per_document_vector_limit: int :ivar kind: Type of query. Required. Known values are: "vector", "text", "imageUrl", and "imageBinary". :vartype kind: str or ~azure.search.documents.models.VectorQueryKind @@ -1253,24 +876,6 @@ class VectorQuery(_Model): ranking lists produced by the different vector queries and/or the results retrieved through the text query. The higher the weight, the higher the documents that matched that query will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero.""" - threshold: Optional["_models.VectorThreshold"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] - ) - """The threshold used for vector queries. Note this can only be set if all 'fields' use the same - similarity metric.""" - filter_override: Optional[str] = rest_field( - name="filterOverride", visibility=["read", "create", "update", "delete", "query"] - ) - """The OData filter expression to apply to this specific vector query. If no filter expression is - defined at the vector level, the expression defined in the top level filter parameter is used - instead.""" - per_document_vector_limit: Optional[int] = rest_field( - name="perDocumentVectorLimit", visibility=["read", "create", "update", "delete", "query"] - ) - """Controls how many vectors can be matched from each document in a vector search query. Setting - it to 1 ensures at most one vector per document is matched, guaranteeing results come from - distinct documents. Setting it to 0 (unlimited) allows multiple relevant vectors from the same - document to be matched. Default is 0.""" kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) """Type of query. Required. Known values are: \"vector\", \"text\", \"imageUrl\", and \"imageBinary\".""" @@ -1285,9 +890,6 @@ def __init__( exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, weight: Optional[float] = None, - threshold: Optional["_models.VectorThreshold"] = None, - filter_override: Optional[str] = None, - per_document_vector_limit: Optional[int] = None, ) -> None: ... @overload @@ -1326,18 +928,6 @@ class VectorizableImageBinaryQuery(VectorQuery, discriminator="imageBinary"): will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. :vartype weight: float - :ivar threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :vartype threshold: ~azure.search.documents.models.VectorThreshold - :ivar filter_override: The OData filter expression to apply to this specific vector query. If - no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :vartype filter_override: str - :ivar per_document_vector_limit: Controls how many vectors can be matched from each document in - a vector search query. Setting it to 1 ensures at most one vector per document is matched, - guaranteeing results come from distinct documents. Setting it to 0 (unlimited) allows multiple - relevant vectors from the same document to be matched. Default is 0. - :vartype per_document_vector_limit: int :ivar base64_image: The base 64 encoded binary of an image to be vectorized to perform a vector search query. :vartype base64_image: str @@ -1363,9 +953,6 @@ def __init__( exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, weight: Optional[float] = None, - threshold: Optional["_models.VectorThreshold"] = None, - filter_override: Optional[str] = None, - per_document_vector_limit: Optional[int] = None, base64_image: Optional[str] = None, ) -> None: ... @@ -1406,18 +993,6 @@ class VectorizableImageUrlQuery(VectorQuery, discriminator="imageUrl"): will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. :vartype weight: float - :ivar threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :vartype threshold: ~azure.search.documents.models.VectorThreshold - :ivar filter_override: The OData filter expression to apply to this specific vector query. If - no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :vartype filter_override: str - :ivar per_document_vector_limit: Controls how many vectors can be matched from each document in - a vector search query. Setting it to 1 ensures at most one vector per document is matched, - guaranteeing results come from distinct documents. Setting it to 0 (unlimited) allows multiple - relevant vectors from the same document to be matched. Default is 0. - :vartype per_document_vector_limit: int :ivar url: The URL of an image to be vectorized to perform a vector search query. :vartype url: str :ivar kind: The kind of vector query being performed. Required. Vector query where an url that @@ -1440,9 +1015,6 @@ def __init__( exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, weight: Optional[float] = None, - threshold: Optional["_models.VectorThreshold"] = None, - filter_override: Optional[str] = None, - per_document_vector_limit: Optional[int] = None, url: Optional[str] = None, ) -> None: ... @@ -1483,23 +1055,8 @@ class VectorizableTextQuery(VectorQuery, discriminator="text"): will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. :vartype weight: float - :ivar threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :vartype threshold: ~azure.search.documents.models.VectorThreshold - :ivar filter_override: The OData filter expression to apply to this specific vector query. If - no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :vartype filter_override: str - :ivar per_document_vector_limit: Controls how many vectors can be matched from each document in - a vector search query. Setting it to 1 ensures at most one vector per document is matched, - guaranteeing results come from distinct documents. Setting it to 0 (unlimited) allows multiple - relevant vectors from the same document to be matched. Default is 0. - :vartype per_document_vector_limit: int :ivar text: The text to be vectorized to perform a vector search query. Required. :vartype text: str - :ivar query_rewrites: Can be configured to let a generative model rewrite the query before - sending it to be vectorized. Known values are: "none" and "generative". - :vartype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType :ivar kind: The kind of vector query being performed. Required. Vector query where a text value that needs to be vectorized is provided. :vartype kind: str or ~azure.search.documents.models.TEXT @@ -1507,11 +1064,6 @@ class VectorizableTextQuery(VectorQuery, discriminator="text"): text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The text to be vectorized to perform a vector search query. Required.""" - query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = rest_field( - name="queryRewrites", visibility=["read", "create", "update", "delete", "query"] - ) - """Can be configured to let a generative model rewrite the query before sending it to be - vectorized. Known values are: \"none\" and \"generative\".""" kind: Literal[VectorQueryKind.TEXT] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The kind of vector query being performed. Required. Vector query where a text value that needs to be vectorized is provided.""" @@ -1526,10 +1078,6 @@ def __init__( exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, weight: Optional[float] = None, - threshold: Optional["_models.VectorThreshold"] = None, - filter_override: Optional[str] = None, - per_document_vector_limit: Optional[int] = None, - query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = None, ) -> None: ... @overload @@ -1568,18 +1116,6 @@ class VectorizedQuery(VectorQuery, discriminator="vector"): will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. :vartype weight: float - :ivar threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :vartype threshold: ~azure.search.documents.models.VectorThreshold - :ivar filter_override: The OData filter expression to apply to this specific vector query. If - no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :vartype filter_override: str - :ivar per_document_vector_limit: Controls how many vectors can be matched from each document in - a vector search query. Setting it to 1 ensures at most one vector per document is matched, - guaranteeing results come from distinct documents. Setting it to 0 (unlimited) allows multiple - relevant vectors from the same document to be matched. Default is 0. - :vartype per_document_vector_limit: int :ivar vector: The vector representation of a search query. Required. :vartype vector: list[float] :ivar kind: The kind of vector query being performed. Required. Vector query where a raw vector @@ -1603,9 +1139,6 @@ def __init__( exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, weight: Optional[float] = None, - threshold: Optional["_models.VectorThreshold"] = None, - filter_override: Optional[str] = None, - per_document_vector_limit: Optional[int] = None, ) -> None: ... @overload @@ -1631,50 +1164,3 @@ class VectorsDebugInfo(_Model): subscores: Optional["_models.QueryResultDocumentSubscores"] = rest_field(visibility=["read"]) """The breakdown of subscores of the document prior to the chosen result set fusion/combination method such as RRF.""" - - -class VectorSimilarityThreshold(VectorThreshold, discriminator="vectorSimilarity"): - """The results of the vector query will be filtered based on the vector similarity metric. Note - this is the canonical definition of similarity metric, not the 'distance' version. The - threshold direction (larger or smaller) will be chosen automatically according to the metric - used by the field. - - :ivar value: The threshold will filter based on the similarity metric value. Note this is the - canonical definition of similarity metric, not the 'distance' version. The threshold direction - (larger or smaller) will be chosen automatically according to the metric used by the field. - Required. - :vartype value: float - :ivar kind: The kind of threshold used to filter vector queries. Required. The results of the - vector query will be filtered based on the vector similarity metric. Note this is the canonical - definition of similarity metric, not the 'distance' version. The threshold direction (larger or - smaller) will be chosen automatically according to the metric used by the field. - :vartype kind: str or ~azure.search.documents.models.VECTOR_SIMILARITY - """ - - value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """The threshold will filter based on the similarity metric value. Note this is the canonical - definition of similarity metric, not the 'distance' version. The threshold direction (larger or - smaller) will be chosen automatically according to the metric used by the field. Required.""" - kind: Literal[VectorThresholdKind.VECTOR_SIMILARITY] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore - """The kind of threshold used to filter vector queries. Required. The results of the vector query - will be filtered based on the vector similarity metric. Note this is the canonical definition - of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) - will be chosen automatically according to the metric used by the field.""" - - @overload - def __init__( - self, - *, - value: float, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.kind = VectorThresholdKind.VECTOR_SIMILARITY # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/models/_patch.py index 1a047214579e..5c41cf547f37 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/models/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/models/_patch.py @@ -13,7 +13,7 @@ from ._models import IndexDocumentsBatch as IndexDocumentsBatchGenerated from ._models import IndexAction -from ._enums import IndexActionType, ScoringStatistics +from ._enums import AutocompleteMode, IndexActionType, ScoringStatistics # Backward-compatible alias: IS was renamed to IS_ENUM to avoid conflict with Python keyword ScoringStatistics.Global = ScoringStatistics.GLOBAL_ENUM # type: ignore[attr-defined] @@ -206,6 +206,7 @@ def _extend_batch(self, documents: List[Dict], action_type: str) -> List[IndexAc IndexDocumentsBatch.__module__ = "azure.search.documents" __all__: list[str] = [ + "AutocompleteMode", "IndexDocumentsBatch", "ScoringStatistics", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/search/azure-search-documents/pyproject.toml b/sdk/search/azure-search-documents/pyproject.toml index 04403f6d9741..16ddad630488 100644 --- a/sdk/search/azure-search-documents/pyproject.toml +++ b/sdk/search/azure-search-documents/pyproject.toml @@ -17,7 +17,7 @@ authors = [ description = "Microsoft Corporation Azure Search Documents Client Library for Python" license = "MIT" classifiers = [ - "Development Status :: 4 - Beta", + "Development Status :: 5 - Production/Stable", "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", diff --git a/sdk/search/azure-search-documents/tests/conftest.py b/sdk/search/azure-search-documents/tests/conftest.py index a761582cd729..bedf8b84d11c 100644 --- a/sdk/search/azure-search-documents/tests/conftest.py +++ b/sdk/search/azure-search-documents/tests/conftest.py @@ -11,6 +11,7 @@ add_general_regex_sanitizer, ) + @pytest.fixture(scope="session", autouse=True) def add_sanitizers(test_proxy): add_remove_header_sanitizer(headers="api-key") diff --git a/sdk/search/azure-search-documents/tests/search_service_preparer.py b/sdk/search/azure-search-documents/tests/search_service_preparer.py index 0084a88e480f..5050e0dc8b8b 100644 --- a/sdk/search/azure-search-documents/tests/search_service_preparer.py +++ b/sdk/search/azure-search-documents/tests/search_service_preparer.py @@ -71,8 +71,11 @@ def _clean_up_indexes(endpoint, cred): client.delete_synonym_map(map.name) # wipe out any existing aliases - for alias in client.list_aliases(): - client.delete_alias(alias) + try: + for alias in client.list_aliases(): + client.delete_alias(alias) + except HttpResponseError: + pass # wipe any existing indexes for index in client.list_indexes(): diff --git a/sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live.py b/sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live.py index 6f521e8e3ef4..730067a919a2 100644 --- a/sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live.py +++ b/sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live.py @@ -5,8 +5,6 @@ from __future__ import annotations -import pytest - from azure.core import MatchConditions from azure.core.exceptions import HttpResponseError from devtools_testutils import AzureRecordedTestCase, recorded_by_proxy, get_credential @@ -26,11 +24,6 @@ SemanticSearch, ) -from azure.search.documents.knowledgebases.models import ( - KnowledgeRetrievalMediumReasoningEffort, - KnowledgeRetrievalMinimalReasoningEffort, -) - from search_service_preparer import SearchEnvVarPreparer, search_decorator @@ -108,8 +101,6 @@ def _create_context(self, endpoint: str) -> "_TestContext": name=base_name, description="configurable knowledge base", knowledge_sources=[KnowledgeSourceReference(name=source_name)], - retrieval_reasoning_effort=KnowledgeRetrievalMinimalReasoningEffort(), - output_mode="extractiveData", ) try: @@ -158,40 +149,28 @@ def test_knowledge_base_configuration_round_trip(self, endpoint: str) -> None: ctx = self._create_context(endpoint) try: created = ctx.created_base - assert isinstance( - created.retrieval_reasoning_effort, - KnowledgeRetrievalMinimalReasoningEffort, - ) - assert created.output_mode == "extractiveData" - assert created.retrieval_instructions is None - assert created.answer_instructions is None + assert created.name == ctx.base_name + assert created.description == "configurable knowledge base" + assert len(created.knowledge_sources) == 1 + assert created.knowledge_sources[0].name == ctx.source_name + assert created.e_tag is not None update_model = KnowledgeBase( name=ctx.base_name, description="config updated", knowledge_sources=[KnowledgeSourceReference(name=ctx.source_name)], - retrieval_reasoning_effort=KnowledgeRetrievalMediumReasoningEffort(), - output_mode="answerSynthesis", - retrieval_instructions="summarize with details", - answer_instructions="include citations and summaries", ) update_model.e_tag = created.e_tag - with pytest.raises(HttpResponseError) as ex: - ctx.index_client.create_or_update_knowledge_base( - update_model, - match_condition=MatchConditions.IfNotModified, - ) - - assert "Retrieval instructions cannot be specified" in str(ex.value) + updated = ctx.index_client.create_or_update_knowledge_base( + update_model, + match_condition=MatchConditions.IfNotModified, + ) + assert updated.description == "config updated" + assert updated.e_tag != created.e_tag fetched = ctx.index_client.get_knowledge_base(ctx.base_name) - assert isinstance( - fetched.retrieval_reasoning_effort, - KnowledgeRetrievalMinimalReasoningEffort, - ) - assert fetched.output_mode == "extractiveData" - assert fetched.retrieval_instructions is None - assert fetched.answer_instructions is None + assert fetched.description == "config updated" + assert len(fetched.knowledge_sources) == 1 finally: self._cleanup(ctx) diff --git a/sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live_async.py b/sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live_async.py index 5f77ccf60cc9..6617610dfbeb 100644 --- a/sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live_async.py +++ b/sdk/search/azure-search-documents/tests/test_knowledge_base_configuration_live_async.py @@ -5,8 +5,6 @@ from __future__ import annotations -import pytest - from azure.core import MatchConditions from azure.core.exceptions import HttpResponseError from devtools_testutils import AzureRecordedTestCase, get_credential @@ -26,10 +24,6 @@ SemanticPrioritizedFields, SemanticSearch, ) -from azure.search.documents.knowledgebases.models import ( - KnowledgeRetrievalMediumReasoningEffort, - KnowledgeRetrievalMinimalReasoningEffort, -) from search_service_preparer import SearchEnvVarPreparer, search_decorator @@ -108,8 +102,6 @@ async def _create_context(self, endpoint: str) -> "_AsyncTestContext": name=base_name, description="configurable knowledge base", knowledge_sources=[KnowledgeSourceReference(name=source_name)], - retrieval_reasoning_effort=KnowledgeRetrievalMinimalReasoningEffort(), - output_mode="extractiveData", ) try: @@ -157,40 +149,28 @@ async def test_knowledge_base_configuration_round_trip(self, endpoint: str) -> N ctx = await self._create_context(endpoint) try: created = ctx.created_base - assert isinstance( - created.retrieval_reasoning_effort, - KnowledgeRetrievalMinimalReasoningEffort, - ) - assert created.output_mode == "extractiveData" - assert created.retrieval_instructions is None - assert created.answer_instructions is None + assert created.name == ctx.base_name + assert created.description == "configurable knowledge base" + assert len(created.knowledge_sources) == 1 + assert created.knowledge_sources[0].name == ctx.source_name + assert created.e_tag is not None update_model = KnowledgeBase( name=ctx.base_name, description="config updated", knowledge_sources=[KnowledgeSourceReference(name=ctx.source_name)], - retrieval_reasoning_effort=KnowledgeRetrievalMediumReasoningEffort(), - output_mode="answerSynthesis", - retrieval_instructions="summarize with details", - answer_instructions="include citations and summaries", ) update_model.e_tag = created.e_tag - with pytest.raises(HttpResponseError) as ex: - await ctx.index_client.create_or_update_knowledge_base( - update_model, - match_condition=MatchConditions.IfNotModified, - ) - - assert "Retrieval instructions cannot be specified" in str(ex.value) + updated = await ctx.index_client.create_or_update_knowledge_base( + update_model, + match_condition=MatchConditions.IfNotModified, + ) + assert updated.description == "config updated" + assert updated.e_tag != created.e_tag fetched = await ctx.index_client.get_knowledge_base(ctx.base_name) - assert isinstance( - fetched.retrieval_reasoning_effort, - KnowledgeRetrievalMinimalReasoningEffort, - ) - assert fetched.output_mode == "extractiveData" - assert fetched.retrieval_instructions is None - assert fetched.answer_instructions is None + assert fetched.description == "config updated" + assert len(fetched.knowledge_sources) == 1 finally: await self._cleanup(ctx) diff --git a/sdk/search/azure-search-documents/tests/test_knowledge_base_live.py b/sdk/search/azure-search-documents/tests/test_knowledge_base_live.py index 517b3b6ff031..7473fbb8410e 100644 --- a/sdk/search/azure-search-documents/tests/test_knowledge_base_live.py +++ b/sdk/search/azure-search-documents/tests/test_knowledge_base_live.py @@ -20,12 +20,10 @@ SearchIndex, SearchIndexKnowledgeSource, SearchIndexKnowledgeSourceParameters, - SearchServiceStatistics, SemanticConfiguration, SemanticField, SemanticPrioritizedFields, SemanticSearch, - ServiceIndexersRuntime, ) from search_service_preparer import SearchEnvVarPreparer, search_decorator @@ -222,33 +220,3 @@ def test_knowledge_source_status_tracking(self, endpoint: str) -> None: assert last.last_synchronization_state.items_updates_processed >= 0 finally: self._cleanup(ctx) - - @SearchEnvVarPreparer() - @search_decorator(schema=None, index_batch=None) - @recorded_by_proxy - def test_service_indexer_runtime_statistics(self, endpoint: str) -> None: - ctx = self._create_context(endpoint) - try: - snapshots = self._poll_status_snapshots(ctx) - assert snapshots, "Expected at least one status snapshot" - - service_stats = ctx.index_client.get_service_statistics() # pylint:disable=protected-access - assert isinstance(service_stats, SearchServiceStatistics) - - runtime = service_stats.indexers_runtime - assert isinstance(runtime, ServiceIndexersRuntime) - assert runtime.used_seconds >= -1 - assert runtime.beginning_time <= runtime.ending_time - if runtime.remaining_seconds is not None: - assert runtime.remaining_seconds >= -1 - - counters = service_stats.counters - assert counters.indexer_counter is not None - assert counters.indexer_counter.usage >= 0 - assert counters.indexer_counter.quota >= counters.indexer_counter.usage - - limits = service_stats.limits - if limits.max_cumulative_indexer_runtime_seconds is not None: - assert limits.max_cumulative_indexer_runtime_seconds > 0 - finally: - self._cleanup(ctx) diff --git a/sdk/search/azure-search-documents/tests/test_knowledge_base_live_async.py b/sdk/search/azure-search-documents/tests/test_knowledge_base_live_async.py index 541543b882dd..840267ac81e9 100644 --- a/sdk/search/azure-search-documents/tests/test_knowledge_base_live_async.py +++ b/sdk/search/azure-search-documents/tests/test_knowledge_base_live_async.py @@ -21,12 +21,10 @@ SearchIndex, SearchIndexKnowledgeSource, SearchIndexKnowledgeSourceParameters, - SearchServiceStatistics, SemanticConfiguration, SemanticField, SemanticPrioritizedFields, SemanticSearch, - ServiceIndexersRuntime, ) from search_service_preparer import SearchEnvVarPreparer, search_decorator @@ -223,33 +221,3 @@ async def test_knowledge_source_status_tracking(self, endpoint: str) -> None: assert last.last_synchronization_state.items_updates_processed >= 0 finally: await self._cleanup(ctx) - - @SearchEnvVarPreparer() - @search_decorator(schema=None, index_batch=None) - @recorded_by_proxy_async - async def test_service_indexer_runtime_statistics(self, endpoint: str) -> None: - ctx = await self._create_context(endpoint) - try: - snapshots = await self._poll_status_snapshots(ctx) - assert snapshots, "Expected at least one status snapshot" - - service_stats = await ctx.index_client.get_service_statistics() # pylint:disable=protected-access - assert isinstance(service_stats, SearchServiceStatistics) - - runtime = service_stats.indexers_runtime - assert isinstance(runtime, ServiceIndexersRuntime) - assert runtime.used_seconds >= -1 - assert runtime.beginning_time <= runtime.ending_time - if runtime.remaining_seconds is not None: - assert runtime.remaining_seconds >= -1 - - counters = service_stats.counters - assert counters.indexer_counter is not None - assert counters.indexer_counter.usage >= 0 - assert counters.indexer_counter.quota >= counters.indexer_counter.usage - - limits = service_stats.limits - if limits.max_cumulative_indexer_runtime_seconds is not None: - assert limits.max_cumulative_indexer_runtime_seconds > 0 - finally: - await self._cleanup(ctx) diff --git a/sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live.py b/sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live.py deleted file mode 100644 index a23794f4c52e..000000000000 --- a/sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live.py +++ /dev/null @@ -1,129 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -from __future__ import annotations - -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError -from devtools_testutils import AzureRecordedTestCase, recorded_by_proxy, get_credential - -from azure.search.documents.indexes import SearchIndexClient -from azure.search.documents.indexes.models import ( - RemoteSharePointKnowledgeSource, - RemoteSharePointKnowledgeSourceParameters, -) - -from search_service_preparer import SearchEnvVarPreparer, search_decorator - - -class _TestContext: - def __init__( - self, - index_client: SearchIndexClient, - source_name: str, - created_revision: RemoteSharePointKnowledgeSource, - ) -> None: - self.index_client = index_client - self.source_name = source_name - self.created_revision = created_revision - - -class TestRemoteSharePointKnowledgeSourceLive(AzureRecordedTestCase): - def _create_context(self, endpoint: str) -> "_TestContext": - credential = get_credential() - index_client = SearchIndexClient(endpoint, credential, retry_backoff_factor=60) - - source_name = self.get_resource_name("spsource") - create_model = RemoteSharePointKnowledgeSource( - name=source_name, - description="initial sharepoint source", - remote_share_point_parameters=RemoteSharePointKnowledgeSourceParameters( - filter_expression="Title:Test", - resource_metadata=["Title", "Path"], - ), - ) - created = index_client.create_knowledge_source(create_model) - return _TestContext(index_client, source_name, created) - - def _cleanup(self, ctx: "_TestContext") -> None: - try: - try: - ctx.index_client.delete_knowledge_source( - ctx.created_revision, - match_condition=MatchConditions.IfNotModified, - ) - except HttpResponseError: - pass - finally: - ctx.index_client.close() - - @SearchEnvVarPreparer() - @search_decorator(schema=None, index_batch=None) - @recorded_by_proxy - def test_remote_sharepoint_knowledge_source_create(self, endpoint: str) -> None: - ctx = self._create_context(endpoint) - try: - assert ctx.created_revision.name == ctx.source_name - assert ctx.created_revision.kind == "remoteSharePoint" - params = ctx.created_revision.remote_share_point_parameters - assert params is not None - assert params.filter_expression == "Title:Test" - assert params.resource_metadata is not None - assert {"Title", "Path"}.issubset(set(params.resource_metadata)) - finally: - self._cleanup(ctx) - - @SearchEnvVarPreparer() - @search_decorator(schema=None, index_batch=None) - @recorded_by_proxy - def test_remote_sharepoint_knowledge_source_update(self, endpoint: str) -> None: - ctx = self._create_context(endpoint) - try: - update_model = RemoteSharePointKnowledgeSource( - name=ctx.source_name, - description="updated description", - remote_share_point_parameters=ctx.created_revision.remote_share_point_parameters, - ) - update_model.e_tag = ctx.created_revision.e_tag - - revised = ctx.index_client.create_or_update_knowledge_source( - update_model, - match_condition=MatchConditions.IfNotModified, - ) - ctx.created_revision = revised - assert revised.description == "updated description" - finally: - self._cleanup(ctx) - - @SearchEnvVarPreparer() - @search_decorator(schema=None, index_batch=None) - @recorded_by_proxy - def test_remote_sharepoint_knowledge_source_read(self, endpoint: str) -> None: - ctx = self._create_context(endpoint) - try: - fetched = ctx.index_client.get_knowledge_source(ctx.source_name) - status = ctx.index_client.get_knowledge_source_status(ctx.source_name) - listed = list(ctx.index_client.list_knowledge_sources()) - - assert fetched.name == ctx.source_name - assert status.synchronization_status in {"creating", "active", "deleting"} - assert any(item.name == ctx.source_name for item in listed) - finally: - self._cleanup(ctx) - - @SearchEnvVarPreparer() - @search_decorator(schema=None, index_batch=None) - @recorded_by_proxy - def test_remote_sharepoint_knowledge_source_delete(self, endpoint: str) -> None: - ctx = self._create_context(endpoint) - try: - ctx.index_client.delete_knowledge_source( - ctx.created_revision, - match_condition=MatchConditions.IfNotModified, - ) - remaining = list(ctx.index_client.list_knowledge_sources()) - assert all(item.name != ctx.source_name for item in remaining) - finally: - ctx.index_client.close() diff --git a/sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live_async.py b/sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live_async.py deleted file mode 100644 index 7b000bbf63a5..000000000000 --- a/sdk/search/azure-search-documents/tests/test_knowledge_source_remote_sharepoint_live_async.py +++ /dev/null @@ -1,130 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -from __future__ import annotations - -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError -from devtools_testutils import AzureRecordedTestCase, get_credential -from devtools_testutils.aio import recorded_by_proxy_async - -from azure.search.documents.indexes.aio import SearchIndexClient -from azure.search.documents.indexes.models import ( - RemoteSharePointKnowledgeSource, - RemoteSharePointKnowledgeSourceParameters, -) - -from search_service_preparer import SearchEnvVarPreparer, search_decorator - - -class _AsyncTestContext: - def __init__( - self, - index_client: SearchIndexClient, - source_name: str, - created_revision: RemoteSharePointKnowledgeSource, - ) -> None: - self.index_client = index_client - self.source_name = source_name - self.created_revision = created_revision - - -class TestRemoteSharePointKnowledgeSourceLiveAsync(AzureRecordedTestCase): - async def _create_context(self, endpoint: str) -> "_AsyncTestContext": - credential = get_credential(is_async=True) - index_client = SearchIndexClient(endpoint, credential, retry_backoff_factor=60) - - source_name = self.get_resource_name("spsource") - create_model = RemoteSharePointKnowledgeSource( - name=source_name, - description="initial sharepoint source", - remote_share_point_parameters=RemoteSharePointKnowledgeSourceParameters( - filter_expression="Title:Test", - resource_metadata=["Title", "Path"], - ), - ) - created = await index_client.create_knowledge_source(create_model) - return _AsyncTestContext(index_client, source_name, created) - - async def _cleanup(self, ctx: "_AsyncTestContext") -> None: - try: - try: - await ctx.index_client.delete_knowledge_source( - ctx.created_revision, - match_condition=MatchConditions.IfNotModified, - ) - except HttpResponseError: - pass - finally: - await ctx.index_client.close() - - @SearchEnvVarPreparer() - @search_decorator(schema=None, index_batch=None) - @recorded_by_proxy_async - async def test_remote_sharepoint_knowledge_source_create(self, endpoint: str) -> None: - ctx = await self._create_context(endpoint) - try: - assert ctx.created_revision.name == ctx.source_name - assert ctx.created_revision.kind == "remoteSharePoint" - params = ctx.created_revision.remote_share_point_parameters - assert params is not None - assert params.filter_expression == "Title:Test" - assert params.resource_metadata is not None - assert {"Title", "Path"}.issubset(set(params.resource_metadata)) - finally: - await self._cleanup(ctx) - - @SearchEnvVarPreparer() - @search_decorator(schema=None, index_batch=None) - @recorded_by_proxy_async - async def test_remote_sharepoint_knowledge_source_update(self, endpoint: str) -> None: - ctx = await self._create_context(endpoint) - try: - update_model = RemoteSharePointKnowledgeSource( - name=ctx.source_name, - description="updated description", - remote_share_point_parameters=ctx.created_revision.remote_share_point_parameters, - ) - update_model.e_tag = ctx.created_revision.e_tag - - revised = await ctx.index_client.create_or_update_knowledge_source( - update_model, - match_condition=MatchConditions.IfNotModified, - ) - ctx.created_revision = revised - assert revised.description == "updated description" - finally: - await self._cleanup(ctx) - - @SearchEnvVarPreparer() - @search_decorator(schema=None, index_batch=None) - @recorded_by_proxy_async - async def test_remote_sharepoint_knowledge_source_read(self, endpoint: str) -> None: - ctx = await self._create_context(endpoint) - try: - fetched = await ctx.index_client.get_knowledge_source(ctx.source_name) - status = await ctx.index_client.get_knowledge_source_status(ctx.source_name) - listed = [item async for item in ctx.index_client.list_knowledge_sources()] - - assert fetched.name == ctx.source_name - assert status.synchronization_status in {"creating", "active", "deleting"} - assert any(item.name == ctx.source_name for item in listed) - finally: - await self._cleanup(ctx) - - @SearchEnvVarPreparer() - @search_decorator(schema=None, index_batch=None) - @recorded_by_proxy_async - async def test_remote_sharepoint_knowledge_source_delete(self, endpoint: str) -> None: - ctx = await self._create_context(endpoint) - try: - await ctx.index_client.delete_knowledge_source( - ctx.created_revision, - match_condition=MatchConditions.IfNotModified, - ) - remaining = [item async for item in ctx.index_client.list_knowledge_sources()] - assert all(item.name != ctx.source_name for item in remaining) - finally: - await ctx.index_client.close() diff --git a/sdk/search/azure-search-documents/tests/test_search_client.py b/sdk/search/azure-search-documents/tests/test_search_client.py index dfbac7308cde..eebac852258d 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client.py +++ b/sdk/search/azure-search-documents/tests/test_search_client.py @@ -146,10 +146,6 @@ def test_get_facets_with_aggregations(self, mock_search_post): facet_bucket = FacetResult() facet_bucket.count = 4 - facet_bucket.avg = 120.5 - facet_bucket.min = 75.0 - facet_bucket.max = 240.0 - facet_bucket.cardinality = 3 search_result.facets = {"baseRate": [facet_bucket]} mock_search_post.return_value = search_result @@ -162,10 +158,6 @@ def test_get_facets_with_aggregations(self, mock_search_post): assert len(facets["baseRate"]) == 1 bucket = facets["baseRate"][0] assert bucket["count"] == 4 - assert bucket["avg"] == 120.5 - assert bucket["min"] == 75.0 - assert bucket["max"] == 240.0 - assert bucket["cardinality"] == 3 @mock.patch("azure.search.documents._operations._operations._SearchClientOperationsMixin.get_document") def test_get_document_v2020_06_30(self, mock_get): diff --git a/sdk/search/azure-search-documents/tests/test_search_client_async.py b/sdk/search/azure-search-documents/tests/test_search_client_async.py index 5ee4c0503ce3..826802f59386 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client_async.py +++ b/sdk/search/azure-search-documents/tests/test_search_client_async.py @@ -60,10 +60,6 @@ async def test_get_facets_with_aggregations(self, mock_search_post): facet_bucket = FacetResult() facet_bucket.count = 4 - facet_bucket.avg = 120.5 - facet_bucket.min = 75.0 - facet_bucket.max = 240.0 - facet_bucket.cardinality = 3 search_result.facets = {"baseRate": [facet_bucket]} mock_search_post.return_value = search_result @@ -76,7 +72,3 @@ async def test_get_facets_with_aggregations(self, mock_search_post): assert len(facets["baseRate"]) == 1 bucket = facets["baseRate"][0] assert bucket["count"] == 4 - assert bucket["avg"] == 120.5 - assert bucket["min"] == 75.0 - assert bucket["max"] == 240.0 - assert bucket["cardinality"] == 3 diff --git a/sdk/search/azure-search-documents/tests/test_search_client_search_live.py b/sdk/search/azure-search-documents/tests/test_search_client_search_live.py index 36f15455cb4f..eb8a2cb17990 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client_search_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_client_search_live.py @@ -4,8 +4,6 @@ # license information. # -------------------------------------------------------------------------- -import math - from azure.search.documents import SearchClient from devtools_testutils import AzureRecordedTestCase, recorded_by_proxy, get_credential @@ -26,7 +24,6 @@ def test_search_client(self, endpoint, index_name): self._test_get_search_coverage(client) self._test_get_search_facets_none(client) self._test_get_search_facets_result(client) - self._test_get_search_facet_metrics(client) self._test_autocomplete(client) self._test_suggest(client) @@ -125,43 +122,6 @@ def _test_get_search_facets_result(self, client): ] } - def _test_get_search_facet_metrics(self, client): - facets = [ - "rooms/baseRate,metric:sum", - "rooms/baseRate,metric:avg", - "rooms/baseRate,metric:min", - "rooms/baseRate,metric:max,default:0", - "rooms/sleepsCount,metric:cardinality,precisionThreshold:10", - ] - results = client.search(search_text="*", facets=facets) - - facet_payload = results.get_facets() - assert facet_payload is not None - - base_rate_metrics = facet_payload.get("rooms/baseRate", []) - assert len(base_rate_metrics) == 4 - - observed_metrics = {} - for bucket in base_rate_metrics: - for metric in ("sum", "avg", "min", "max"): - value = bucket.get(metric) - if value is not None: - observed_metrics[metric] = value - - expected_metrics = { - "sum": 27.91, - "avg": 6.9775, - "min": 2.44, - "max": 9.69, - } - for metric, expected in expected_metrics.items(): - assert metric in observed_metrics - assert math.isclose(observed_metrics[metric], expected, rel_tol=0.0, abs_tol=0.001) - - sleeps_metrics = facet_payload.get("rooms/sleepsCount", []) - assert len(sleeps_metrics) == 1 - assert sleeps_metrics[0].get("cardinality") == 1 - def _test_autocomplete(self, client): results = client.autocomplete(search_text="mot", suggester_name="sg") assert any(d.text == "motel" for d in results) diff --git a/sdk/search/azure-search-documents/tests/test_search_client_search_live_async.py b/sdk/search/azure-search-documents/tests/test_search_client_search_live_async.py index a3eec2f3ff63..d6e4c7e61502 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client_search_live_async.py +++ b/sdk/search/azure-search-documents/tests/test_search_client_search_live_async.py @@ -28,7 +28,6 @@ async def test_search_client(self, endpoint, index_name): await self._test_get_search_coverage(client) await self._test_get_search_facets_none(client) await self._test_get_search_facets_result(client) - await self._test_get_search_facet_metrics(client) await self._test_autocomplete(client) await self._test_suggest(client) @@ -135,42 +134,6 @@ async def _test_get_search_facets_result(self, client): ] } - async def _test_get_search_facet_metrics(self, client): - facets = [ - "rooms/baseRate,metric:sum", - "rooms/baseRate,metric:avg", - "rooms/baseRate,metric:min", - "rooms/baseRate,metric:max,default:0", - "rooms/sleepsCount,metric:cardinality,precisionThreshold:10", - ] - results = await client.search(search_text="*", facets=facets) - facet_payload = await results.get_facets() - assert facet_payload is not None - - base_rate_metrics = facet_payload.get("rooms/baseRate", []) - assert len(base_rate_metrics) == 4 - - observed_metrics = {} - for bucket in base_rate_metrics: - for metric in ("sum", "avg", "min", "max"): - value = bucket.get(metric) - if value is not None: - observed_metrics[metric] = value - - expected_metrics = { - "sum": 27.91, - "avg": 6.9775, - "min": 2.44, - "max": 9.69, - } - for metric, expected in expected_metrics.items(): - assert metric in observed_metrics - assert observed_metrics[metric] == pytest.approx(expected, abs=0.001) - - sleeps_metrics = facet_payload.get("rooms/sleepsCount", []) - assert len(sleeps_metrics) == 1 - assert sleeps_metrics[0].get("cardinality") == 1 - async def _test_autocomplete(self, client): results = await client.autocomplete(search_text="mot", suggester_name="sg") assert any(d.text == "motel" for d in results) diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live.py b/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live.py index 96c9baa2eff9..f2e94b05baf9 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live.py @@ -25,6 +25,7 @@ class TestSearchClientAlias(AzureRecordedTestCase): + @pytest.mark.skip(reason="Aliases endpoint not yet deployed for 2026-04-01 GA API version") @SearchEnvVarPreparer() @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live_async.py b/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live_async.py index 3ea3d88fd418..3dc1d1e087bf 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live_async.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_alias_live_async.py @@ -26,6 +26,7 @@ class TestSearchClientAlias(AzureRecordedTestCase): + @pytest.mark.skip(reason="Aliases endpoint not yet deployed for 2026-04-01 GA API version") @SearchEnvVarPreparer() @search_decorator(schema="hotel_schema.json", index_batch="hotel_small.json") @recorded_by_proxy_async diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_live.py b/sdk/search/azure-search-documents/tests/test_search_index_client_live.py index 84fd24dc571b..5fa8f785dfb7 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_live.py @@ -190,55 +190,6 @@ def _test_delete_indexes(self, client): for index in client.list_indexes(): client.delete_index(index) - @SearchEnvVarPreparer() - @recorded_by_proxy - def test_purview_enabled_index(self, search_service_endpoint, search_service_name): - del search_service_name # unused - endpoint = search_service_endpoint - client = SearchIndexClient(endpoint, get_credential(), retry_backoff_factor=60) - - index_name = self.get_resource_name("purview-index") - fields = [ - SearchField( - name="id", - type=SearchFieldDataType.STRING, - key=True, - filterable=True, - sortable=True, - ), - SearchField( - name="sensitivityLabel", - type=SearchFieldDataType.STRING, - filterable=True, - sensitivity_label=True, - ), - ] - index = SearchIndex(name=index_name, fields=fields, purview_enabled=True) - - created = client.create_index(index) - try: - assert created.purview_enabled is True - for field in created.fields: - if field.name == "sensitivityLabel": - assert field.sensitivity_label is True - break - else: - raise AssertionError("Expected sensitivityLabel field to be present") - - fetched = client.get_index(index_name) - assert fetched.purview_enabled is True - for field in fetched.fields: - if field.name == "sensitivityLabel": - assert field.sensitivity_label is True - break - else: - raise AssertionError("Expected sensitivityLabel field to be present") - finally: - try: - client.delete_index(index_name) - except HttpResponseError: - pass - @SearchEnvVarPreparer() @recorded_by_proxy def test_scoring_profile_product_aggregation(self, search_service_endpoint, search_service_name): diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_live_async.py b/sdk/search/azure-search-documents/tests/test_search_index_client_live_async.py index a7db4d3f4818..d7d046a37e84 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_live_async.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_live_async.py @@ -195,56 +195,6 @@ async def _test_delete_indexes(self, client): async for index in result: await client.delete_index(index.name) - @SearchEnvVarPreparer() - @recorded_by_proxy_async - async def test_purview_enabled_index(self, search_service_endpoint, search_service_name): - del search_service_name # unused - endpoint = search_service_endpoint - client = SearchIndexClient(endpoint, get_credential(is_async=True), retry_backoff_factor=60) - - index_name = self.get_resource_name("purview-index") - fields = [ - SearchField( - name="id", - type=SearchFieldDataType.STRING, - key=True, - filterable=True, - sortable=True, - ), - SearchField( - name="sensitivityLabel", - type=SearchFieldDataType.STRING, - filterable=True, - sensitivity_label=True, - ), - ] - index = SearchIndex(name=index_name, fields=fields, purview_enabled=True) - - async with client: - created = await client.create_index(index) - try: - assert created.purview_enabled is True - for field in created.fields: - if field.name == "sensitivityLabel": - assert field.sensitivity_label is True - break - else: - raise AssertionError("Expected sensitivityLabel field to be present") - - fetched = await client.get_index(index_name) - assert fetched.purview_enabled is True - for field in fetched.fields: - if field.name == "sensitivityLabel": - assert field.sensitivity_label is True - break - else: - raise AssertionError("Expected sensitivityLabel field to be present") - finally: - try: - await client.delete_index(index_name) - except HttpResponseError: - pass - @SearchEnvVarPreparer() @recorded_by_proxy_async async def test_scoring_profile_product_aggregation(self, search_service_endpoint, search_service_name): diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live.py b/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live.py index 5b4afeba5c51..627ea3518baf 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live.py @@ -115,7 +115,6 @@ def _test_create_skillset(self, client): assert result.skills[3].minimum_precision == 0.5 assert len(client.get_skillsets()) == 1 - client.reset_skills(result, [x.name for x in result.skills]) def _test_get_skillset(self, client): name = "test-ss-get" diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live_async.py b/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live_async.py index 8829a539a3d1..f5ae897ebcaa 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live_async.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_skillset_live_async.py @@ -87,7 +87,6 @@ async def _test_create_skillset(self, client): assert result.skills[3].minimum_precision == 0.5 assert len(await client.get_skillsets()) == 1 - await client.reset_skills(result, [x.name for x in result.skills]) async def _test_get_skillset(self, client): name = "test-ss-get" diff --git a/sdk/search/azure-search-documents/tsp-location.yaml b/sdk/search/azure-search-documents/tsp-location.yaml index b843d75e9b2d..77ce3ceb4d3e 100644 --- a/sdk/search/azure-search-documents/tsp-location.yaml +++ b/sdk/search/azure-search-documents/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/search/data-plane/Search -commit: 83e4bd86d32bf00cfab973cf24b2e10733f4d1b8 +commit: 3a1a604f7bb618bb965b4ec7bee181138e4b7767 repo: Azure/azure-rest-api-specs \ No newline at end of file From 3fcf93ec60d80e386f41696455645824f5eaf655 Mon Sep 17 00:00:00 2001 From: hizixin <19810781+hizixin@users.noreply.github.com> Date: Tue, 7 Apr 2026 13:40:43 -0700 Subject: [PATCH 2/2] Address API board review comments and bump version to 12.0.0 - Addressed API board review feedback (audience support, tombstone removal, IS_ENUM alias cleanup, SearchIndexResponse.semantic_search rename) - Version bump to 12.0.0 - Regenerated SDK from TypeSpec spec commit 098edbb979d20e35191613c25a9f4d6b381375cd - Fixed mypy/pylint errors from regen - Updated CHANGELOG Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../azure-search-documents/CHANGELOG.md | 9 ++- .../apiview-properties.json | 3 - .../documents/_operations/_operations.py | 34 +++++----- .../search/documents/_operations/_patch.py | 21 +++--- .../azure/search/documents/_patch.py | 18 +++-- .../azure/search/documents/_version.py | 2 +- .../documents/aio/_operations/_operations.py | 32 +++++---- .../documents/aio/_operations/_patch.py | 13 ++-- .../azure/search/documents/aio/_patch.py | 18 +++-- .../documents/indexes/_operations/_patch.py | 2 +- .../azure/search/documents/indexes/_patch.py | 61 ++++++++++++++++- .../search/documents/indexes/aio/_patch.py | 66 ++++++++++++++++++- .../search/documents/indexes/models/_enums.py | 14 ++-- .../documents/indexes/models/_models.py | 25 +++---- .../search/documents/indexes/models/_patch.py | 61 +---------------- .../documents/knowledgebases/_client.py | 14 +++- .../knowledgebases/_configuration.py | 13 +++- .../knowledgebases/_operations/_operations.py | 30 ++------- .../search/documents/knowledgebases/_patch.py | 36 +++++++++- .../documents/knowledgebases/aio/_client.py | 12 +++- .../knowledgebases/aio/_configuration.py | 11 +++- .../aio/_operations/_operations.py | 30 ++------- .../documents/knowledgebases/aio/_patch.py | 39 ++++++++++- .../azure/search/documents/models/__init__.py | 8 +-- .../azure/search/documents/models/_enums.py | 2 +- .../azure/search/documents/models/_models.py | 4 +- .../azure/search/documents/models/_patch.py | 7 +- .../tests/test_search_client.py | 4 +- .../tests/test_search_client_async.py | 4 +- .../azure-search-documents/tsp-location.yaml | 2 +- 30 files changed, 370 insertions(+), 225 deletions(-) diff --git a/sdk/search/azure-search-documents/CHANGELOG.md b/sdk/search/azure-search-documents/CHANGELOG.md index 6789cd3ee054..6a42f58c1000 100644 --- a/sdk/search/azure-search-documents/CHANGELOG.md +++ b/sdk/search/azure-search-documents/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 11.7.0 (2026-04-01) +## 12.0.0 (2026-04-01) ### Features Added @@ -34,8 +34,9 @@ The following changes are due to the migration from AutoRest to TypeSpec code generation and affect all users: -- `SentimentSkillVersion` and `EntityRecognitionSkillVersion` are removed. Only the latest skill versions are supported. - Model `serialize` and `deserialize` methods are removed. Use `as_dict` and constructor instead. +- `EntityRecognitionSkill`, `EntityRecognitionSkillLanguage`, `EntityRecognitionSkillVersion`, `SentimentSkill`, and `SentimentSkillVersion` are removed. Only the latest skill versions (V3) are supported. +- `PathHierarchyTokenizer` is renamed to `PathHierarchyTokenizerV2`. > The following changes do not impact the API of stable versions such as 11.6.0. > Only code written against a beta version such as 11.7.0b2 may be affected. @@ -57,7 +58,6 @@ The following changes are due to the migration from AutoRest to TypeSpec code ge - `azure.search.documents.indexes.models.KnowledgeRetrievalOutputMode` - `azure.search.documents.indexes.models.PermissionFilter` - `azure.search.documents.indexes.models.SearchIndexerCache` - - `azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreParameters` - `azure.search.documents.indexes.models.SearchIndexPermissionFilterOption` - `azure.search.documents.indexes.models.ServiceIndexersRuntime` - `azure.search.documents.indexes.models.SplitSkillEncoderModelName` @@ -69,6 +69,7 @@ The following changes are due to the migration from AutoRest to TypeSpec code ge - `azure.search.documents.knowledgebases.models.KnowledgeBaseModelQueryPlanningActivityRecord` - `azure.search.documents.knowledgebases.models.KnowledgeBaseRemoteSharePointReference` - `azure.search.documents.knowledgebases.models.RemoteSharePointKnowledgeSourceParams` + - `azure.search.documents.models.DebugInfo` - `azure.search.documents.models.HybridCountAndFacetMode` - `azure.search.documents.models.HybridSearch` - `azure.search.documents.models.QueryLanguage` @@ -79,6 +80,7 @@ The following changes are due to the migration from AutoRest to TypeSpec code ge - `azure.search.documents.models.QueryRewritesType` - `azure.search.documents.models.QueryRewritesValuesDebugInfo` - `azure.search.documents.models.QuerySpellerType` + - `azure.search.documents.models.SearchDocumentsResult` - `azure.search.documents.models.SearchScoreThreshold` - `azure.search.documents.models.SemanticDebugInfo` - `azure.search.documents.models.SemanticFieldState` @@ -171,6 +173,7 @@ The following changes are due to the migration from AutoRest to TypeSpec code ge ### Other Changes - Updated default API version to `2026-04-01`. +- Some boolean model properties (e.g., `use_query_mode`, `ignore_case`, `only_longest_match`) now default to `None` instead of `False`. There is no behavioral change — the server applies the same default when the property is omitted. ## 11.7.0b2 (2025-11-13) diff --git a/sdk/search/azure-search-documents/apiview-properties.json b/sdk/search/azure-search-documents/apiview-properties.json index 1c6e1716daff..f95c7f9f5b85 100644 --- a/sdk/search/azure-search-documents/apiview-properties.json +++ b/sdk/search/azure-search-documents/apiview-properties.json @@ -56,7 +56,6 @@ "azure.search.documents.indexes.models.DataChangeDetectionPolicy": "Search.DataChangeDetectionPolicy", "azure.search.documents.indexes.models.DataDeletionDetectionPolicy": "Search.DataDeletionDetectionPolicy", "azure.search.documents.indexes.models.DataSourceCredentials": "Search.DataSourceCredentials", - "azure.search.documents.models.DebugInfo": "Search.DebugInfo", "azure.search.documents.indexes.models.DefaultCognitiveServicesAccount": "Search.DefaultCognitiveServicesAccount", "azure.search.documents.indexes.models.DictionaryDecompounderTokenFilter": "Search.DictionaryDecompounderTokenFilter", "azure.search.documents.indexes.models.ScoringFunction": "Search.ScoringFunction", @@ -172,7 +171,6 @@ "azure.search.documents.indexes.models.ScalarQuantizationParameters": "Search.ScalarQuantizationParameters", "azure.search.documents.indexes.models.ScoringProfile": "Search.ScoringProfile", "azure.search.documents.indexes.models.SearchAlias": "Search.SearchAlias", - "azure.search.documents.models.SearchDocumentsResult": "Search.SearchDocumentsResult", "azure.search.documents.indexes.models.SearchField": "Search.SearchField", "azure.search.documents.indexes.models.SearchIndex": "Search.SearchIndex", "azure.search.documents.indexes.models.SearchIndexer": "Search.SearchIndexer", @@ -200,7 +198,6 @@ "azure.search.documents.indexes.models.SearchIndexKnowledgeSource": "Search.SearchIndexKnowledgeSource", "azure.search.documents.indexes.models.SearchIndexKnowledgeSourceParameters": "Search.SearchIndexKnowledgeSourceParameters", "azure.search.documents.knowledgebases.models.SearchIndexKnowledgeSourceParams": "Search.SearchIndexKnowledgeSourceParams", - "azure.search.documents.models.SearchRequest": "Search.SearchRequest", "azure.search.documents.indexes.models.SearchResourceEncryptionKey": "Search.SearchResourceEncryptionKey", "azure.search.documents.models.SearchResult": "Search.SearchResult", "azure.search.documents.indexes.models.SearchServiceCounters": "Search.SearchServiceCounters", diff --git a/sdk/search/azure-search-documents/azure/search/documents/_operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_operations/_operations.py index a21db33544e6..dbd5421f2a00 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_operations/_operations.py @@ -357,7 +357,7 @@ def build_search_autocomplete_get_request( *, search_text: str, suggester_name: str, - autocomplete_mode: Optional[Union[str, _models1._enums.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models1.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -538,7 +538,7 @@ def _search_get( # pylint: disable=too-many-locals semantic_query: Optional[str] = None, debug: Optional[Union[str, _models1.QueryDebugMode]] = None, **kwargs: Any - ) -> _models1.SearchDocumentsResult: + ) -> _models1._models.SearchDocumentsResult: """Searches for documents in the index. :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to @@ -663,7 +663,7 @@ def _search_get( # pylint: disable=too-many-locals "all". Default value is None. :paramtype debug: str or ~azure.search.documents.models.QueryDebugMode :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping - :rtype: ~azure.search.documents.models.SearchDocumentsResult + :rtype: ~azure.search.documents.models._models.SearchDocumentsResult :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -677,7 +677,7 @@ def _search_get( # pylint: disable=too-many-locals _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models1.SearchDocumentsResult] = kwargs.pop("cls", None) + cls: ClsType[_models1._models.SearchDocumentsResult] = kwargs.pop("cls", None) _request = build_search_search_get_request( index_name=self._config.index_name, @@ -740,7 +740,9 @@ def _search_get( # pylint: disable=too-many-locals if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models1.SearchDocumentsResult, response.json()) + deserialized = _deserialize( + _models1._models.SearchDocumentsResult, response.json() # pylint: disable=protected-access + ) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -781,15 +783,15 @@ def _search_post( # pylint: disable=too-many-locals vector_queries: Optional[list[_models1.VectorQuery]] = None, vector_filter_mode: Optional[Union[str, _models1.VectorFilterMode]] = None, **kwargs: Any - ) -> _models1.SearchDocumentsResult: ... + ) -> _models1._models.SearchDocumentsResult: ... @overload def _search_post( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models1.SearchDocumentsResult: ... + ) -> _models1._models.SearchDocumentsResult: ... @overload def _search_post( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models1.SearchDocumentsResult: ... + ) -> _models1._models.SearchDocumentsResult: ... @distributed_trace def _search_post( # pylint: disable=too-many-locals @@ -825,7 +827,7 @@ def _search_post( # pylint: disable=too-many-locals vector_queries: Optional[list[_models1.VectorQuery]] = None, vector_filter_mode: Optional[Union[str, _models1.VectorFilterMode]] = None, **kwargs: Any - ) -> _models1.SearchDocumentsResult: + ) -> _models1._models.SearchDocumentsResult: """Searches for documents in the index. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -946,7 +948,7 @@ def _search_post( # pylint: disable=too-many-locals "postFilter", "preFilter", and "strictPostFilter". Default value is None. :paramtype vector_filter_mode: str or ~azure.search.documents.models.VectorFilterMode :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping - :rtype: ~azure.search.documents.models.SearchDocumentsResult + :rtype: ~azure.search.documents.models._models.SearchDocumentsResult :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -961,7 +963,7 @@ def _search_post( # pylint: disable=too-many-locals _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models1.SearchDocumentsResult] = kwargs.pop("cls", None) + cls: ClsType[_models1._models.SearchDocumentsResult] = kwargs.pop("cls", None) if body is _Unset: body = { @@ -1039,7 +1041,9 @@ def _search_post( # pylint: disable=too-many-locals if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models1.SearchDocumentsResult, response.json()) + deserialized = _deserialize( + _models1._models.SearchDocumentsResult, response.json() # pylint: disable=protected-access + ) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1535,7 +1539,7 @@ def _autocomplete_get( *, search_text: str, suggester_name: str, - autocomplete_mode: Optional[Union[str, _models1._enums.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models1.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -1661,7 +1665,7 @@ def _autocomplete_post( search_text: str, suggester_name: str, content_type: str = "application/json", - autocomplete_mode: Optional[Union[str, _models1._enums.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models1.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -1687,7 +1691,7 @@ def _autocomplete_post( # pylint: disable=too-many-locals *, search_text: str = _Unset, suggester_name: str = _Unset, - autocomplete_mode: Optional[Union[str, _models1._enums.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models1.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, diff --git a/sdk/search/azure-search-documents/azure/search/documents/_operations/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/_operations/_patch.py index 7bcab5f499ca..628843b22425 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_operations/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_operations/_patch.py @@ -19,6 +19,7 @@ from ._operations import _SearchClientOperationsMixin as _SearchClientOperationsMixinGenerated from ..models._patch import RequestEntityTooLargeError from .. import models as _models +from ..models._models import SearchDocumentsResult, SearchRequest def _convert_search_result(result: _models.SearchResult) -> Dict[str, Any]: @@ -38,7 +39,7 @@ def _convert_search_result(result: _models.SearchResult) -> Dict[str, Any]: return ret -def _pack_continuation_token(response: _models.SearchDocumentsResult, api_version: str) -> Optional[bytes]: +def _pack_continuation_token(response: SearchDocumentsResult, api_version: str) -> Optional[bytes]: """Pack continuation token from search response. :param ~azure.search.documents.models.SearchDocumentsResult response: The search response. :param str api_version: The API version used in the request. @@ -64,7 +65,7 @@ def _unpack_continuation_token(token: bytes) -> tuple: unpacked_token = json.loads(base64.b64decode(token)) next_link = unpacked_token["nextLink"] next_page_parameters = unpacked_token["nextPageParameters"] - next_page_request = _models.SearchRequest._deserialize(next_page_parameters, []) # pylint: disable=protected-access + next_page_request = SearchRequest._deserialize(next_page_parameters, []) # pylint: disable=protected-access return next_link, next_page_request @@ -120,7 +121,7 @@ def _build_search_request( semantic_error_mode: Optional[Union[str, _models.SemanticErrorMode]] = None, semantic_max_wait_in_milliseconds: Optional[int] = None, debug: Optional[Union[str, _models.QueryDebugMode]] = None, -) -> _models.SearchRequest: +) -> SearchRequest: # pylint:disable=too-many-locals """Build a SearchRequest from search parameters. @@ -187,7 +188,7 @@ def _build_search_request( highlight_fields_list = [f.strip() for f in highlight_fields.split(",") if f.strip()] # Build and return the search request - return _models.SearchRequest( # type: ignore[misc] + return SearchRequest( # type: ignore[misc] search_text=search_text, include_total_count=include_total_count, facets=facets, @@ -222,7 +223,7 @@ def _build_search_request( class SearchPageIterator(PageIterator): """An iterator over search result pages.""" - def __init__(self, client, initial_request: _models.SearchRequest, kwargs, continuation_token=None) -> None: + def __init__(self, client, initial_request: SearchRequest, kwargs, continuation_token=None) -> None: super(SearchPageIterator, self).__init__( get_next=self._get_next_cb, extract_data=self._extract_data_cb, @@ -243,7 +244,7 @@ def _get_next_cb(self, continuation_token): _next_link, next_page_request = _unpack_continuation_token(continuation_token) return self._client._search_post(body=next_page_request, **self._kwargs) # pylint:disable=protected-access - def _extract_data_cb(self, response: _models.SearchDocumentsResult): + def _extract_data_cb(self, response: SearchDocumentsResult): continuation_token = _pack_continuation_token(response, api_version=self._api_version) results = [_convert_search_result(r) for r in response.results] return continuation_token, results @@ -251,7 +252,7 @@ def _extract_data_cb(self, response: _models.SearchDocumentsResult): @_ensure_response def get_facets(self) -> Optional[Dict[str, Any]]: self.continuation_token = None - response = cast(_models.SearchDocumentsResult, self._response) + response = cast(SearchDocumentsResult, self._response) if response.facets is not None and self._facets is None: self._facets = { k: [x.as_dict() if hasattr(x, "as_dict") else dict(x) for x in v] for k, v in response.facets.items() @@ -261,19 +262,19 @@ def get_facets(self) -> Optional[Dict[str, Any]]: @_ensure_response def get_coverage(self) -> Optional[float]: self.continuation_token = None - response = cast(_models.SearchDocumentsResult, self._response) + response = cast(SearchDocumentsResult, self._response) return response.coverage @_ensure_response def get_count(self) -> Optional[int]: self.continuation_token = None - response = cast(_models.SearchDocumentsResult, self._response) + response = cast(SearchDocumentsResult, self._response) return response.count @_ensure_response def get_answers(self) -> Optional[List[_models.QueryAnswerResult]]: self.continuation_token = None - response = cast(_models.SearchDocumentsResult, self._response) + response = cast(SearchDocumentsResult, self._response) return cast(Optional[List[_models.QueryAnswerResult]], response.answers) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/_patch.py index 391b87a4a8ff..9dcbc461b0a0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_patch.py @@ -49,23 +49,29 @@ class ApiVersion(str, Enum, metaclass=CaseInsensitiveEnumMeta): class SearchClient(_SearchClient): """SearchClient. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key - credential type or a token credential type. Required. + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or - ~azure.core.credentials.TokenCredential + ~azure.core.credentials.TokenCredential :param index_name: The name of the index. Required. :type index_name: str - :keyword api_version: The API version to use for this operation. Default value is - "2026-04-01". Note that overriding this default value may result in unsupported - behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str + :keyword str audience: Sets the Audience to use for authentication with Microsoft Entra ID. The + audience is not considered when using a shared key. If audience is not provided, the public cloud + audience will be assumed. """ def __init__( self, endpoint: str, index_name: str, credential: Union[AzureKeyCredential, TokenCredential], **kwargs: Any ) -> None: + audience = kwargs.pop("audience", None) + if audience: + kwargs.setdefault("credential_scopes", [audience.rstrip("/") + "/.default"]) super().__init__(endpoint=endpoint, credential=credential, index_name=index_name, **kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_version.py b/sdk/search/azure-search-documents/azure/search/documents/_version.py index 6d71262c832f..22c33ec5bb72 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_version.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_version.py @@ -3,6 +3,6 @@ # Licensed under the MIT License. # ------------------------------------ -VERSION = "11.7.0" # type: str +VERSION = "12.0.0" # type: str SDK_MONIKER = "search-documents/{}".format(VERSION) # type: str diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_operations.py index 1be2fefa06f9..adf0bea3c777 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_operations.py @@ -150,7 +150,7 @@ async def _search_get( # pylint: disable=too-many-locals semantic_query: Optional[str] = None, debug: Optional[Union[str, _models2.QueryDebugMode]] = None, **kwargs: Any - ) -> _models2.SearchDocumentsResult: + ) -> _models2._models.SearchDocumentsResult: """Searches for documents in the index. :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to @@ -275,7 +275,7 @@ async def _search_get( # pylint: disable=too-many-locals "all". Default value is None. :paramtype debug: str or ~azure.search.documents.models.QueryDebugMode :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping - :rtype: ~azure.search.documents.models.SearchDocumentsResult + :rtype: ~azure.search.documents.models._models.SearchDocumentsResult :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -289,7 +289,7 @@ async def _search_get( # pylint: disable=too-many-locals _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models2.SearchDocumentsResult] = kwargs.pop("cls", None) + cls: ClsType[_models2._models.SearchDocumentsResult] = kwargs.pop("cls", None) _request = build_search_search_get_request( index_name=self._config.index_name, @@ -352,7 +352,9 @@ async def _search_get( # pylint: disable=too-many-locals if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models2.SearchDocumentsResult, response.json()) + deserialized = _deserialize( + _models2._models.SearchDocumentsResult, response.json() # pylint: disable=protected-access + ) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -393,15 +395,15 @@ async def _search_post( # pylint: disable=too-many-locals vector_queries: Optional[list[_models2.VectorQuery]] = None, vector_filter_mode: Optional[Union[str, _models2.VectorFilterMode]] = None, **kwargs: Any - ) -> _models2.SearchDocumentsResult: ... + ) -> _models2._models.SearchDocumentsResult: ... @overload async def _search_post( self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models2.SearchDocumentsResult: ... + ) -> _models2._models.SearchDocumentsResult: ... @overload async def _search_post( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models2.SearchDocumentsResult: ... + ) -> _models2._models.SearchDocumentsResult: ... @distributed_trace_async async def _search_post( # pylint: disable=too-many-locals @@ -437,7 +439,7 @@ async def _search_post( # pylint: disable=too-many-locals vector_queries: Optional[list[_models2.VectorQuery]] = None, vector_filter_mode: Optional[Union[str, _models2.VectorFilterMode]] = None, **kwargs: Any - ) -> _models2.SearchDocumentsResult: + ) -> _models2._models.SearchDocumentsResult: """Searches for documents in the index. :param body: Is either a JSON type or a IO[bytes] type. Required. @@ -558,7 +560,7 @@ async def _search_post( # pylint: disable=too-many-locals "postFilter", "preFilter", and "strictPostFilter". Default value is None. :paramtype vector_filter_mode: str or ~azure.search.documents.models.VectorFilterMode :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping - :rtype: ~azure.search.documents.models.SearchDocumentsResult + :rtype: ~azure.search.documents.models._models.SearchDocumentsResult :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -573,7 +575,7 @@ async def _search_post( # pylint: disable=too-many-locals _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models2.SearchDocumentsResult] = kwargs.pop("cls", None) + cls: ClsType[_models2._models.SearchDocumentsResult] = kwargs.pop("cls", None) if body is _Unset: body = { @@ -651,7 +653,9 @@ async def _search_post( # pylint: disable=too-many-locals if _stream: deserialized = response.iter_bytes() if _decompress else response.iter_raw() else: - deserialized = _deserialize(_models2.SearchDocumentsResult, response.json()) + deserialized = _deserialize( + _models2._models.SearchDocumentsResult, response.json() # pylint: disable=protected-access + ) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1147,7 +1151,7 @@ async def _autocomplete_get( *, search_text: str, suggester_name: str, - autocomplete_mode: Optional[Union[str, _models2._enums.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models2.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -1273,7 +1277,7 @@ async def _autocomplete_post( search_text: str, suggester_name: str, content_type: str = "application/json", - autocomplete_mode: Optional[Union[str, _models2._enums.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models2.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, @@ -1299,7 +1303,7 @@ async def _autocomplete_post( # pylint: disable=too-many-locals *, search_text: str = _Unset, suggester_name: str = _Unset, - autocomplete_mode: Optional[Union[str, _models2._enums.AutocompleteMode]] = None, + autocomplete_mode: Optional[Union[str, _models2.AutocompleteMode]] = None, filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_patch.py index e4352e3e6121..caa5d5dab238 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_operations/_patch.py @@ -21,6 +21,7 @@ ) from ...models._patch import RequestEntityTooLargeError from ... import models as _models +from ...models._models import SearchDocumentsResult, SearchRequest def _ensure_response(f): @@ -44,7 +45,7 @@ async def wrapper(self, *args, **kw): class AsyncSearchPageIterator(AsyncPageIterator): """An async iterator over search result pages.""" - def __init__(self, client, initial_request: _models.SearchRequest, kwargs, continuation_token=None) -> None: + def __init__(self, client, initial_request: SearchRequest, kwargs, continuation_token=None) -> None: super(AsyncSearchPageIterator, self).__init__( get_next=self._get_next_cb, extract_data=self._extract_data_cb, @@ -67,7 +68,7 @@ async def _get_next_cb(self, continuation_token): body=next_page_request, **self._kwargs ) - async def _extract_data_cb(self, response: _models.SearchDocumentsResult): + async def _extract_data_cb(self, response: SearchDocumentsResult): continuation_token = _pack_continuation_token(response, api_version=self._api_version) results = [_convert_search_result(r) for r in response.results] return continuation_token, results @@ -75,7 +76,7 @@ async def _extract_data_cb(self, response: _models.SearchDocumentsResult): @_ensure_response async def get_facets(self) -> Optional[Dict[str, Any]]: self.continuation_token = None - response = cast(_models.SearchDocumentsResult, self._response) + response = cast(SearchDocumentsResult, self._response) if response.facets is not None and self._facets is None: self._facets = { k: [x.as_dict() if hasattr(x, "as_dict") else dict(x) for x in v] for k, v in response.facets.items() @@ -85,19 +86,19 @@ async def get_facets(self) -> Optional[Dict[str, Any]]: @_ensure_response async def get_coverage(self) -> Optional[float]: self.continuation_token = None - response = cast(_models.SearchDocumentsResult, self._response) + response = cast(SearchDocumentsResult, self._response) return response.coverage @_ensure_response async def get_count(self) -> Optional[int]: self.continuation_token = None - response = cast(_models.SearchDocumentsResult, self._response) + response = cast(SearchDocumentsResult, self._response) return response.count @_ensure_response async def get_answers(self) -> Optional[List[_models.QueryAnswerResult]]: self.continuation_token = None - response = cast(_models.SearchDocumentsResult, self._response) + response = cast(SearchDocumentsResult, self._response) return response.answers diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_patch.py index 1947a709dd72..39284a84d0f1 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_patch.py @@ -27,23 +27,29 @@ class SearchClient(_SearchClient): """SearchClient. - :param endpoint: Service host. Required. + :param endpoint: The endpoint URL of the search service. Required. :type endpoint: str :param credential: Credential used to authenticate requests to the service. Is either a key - credential type or a token credential type. Required. + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or - ~azure.core.credentials_async.AsyncTokenCredential + ~azure.core.credentials_async.AsyncTokenCredential :param index_name: The name of the index. Required. :type index_name: str - :keyword api_version: The API version to use for this operation. Default value is - "2026-04-01". Note that overriding this default value may result in unsupported - behavior. + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str + :keyword str audience: Sets the Audience to use for authentication with Microsoft Entra ID. The + audience is not considered when using a shared key. If audience is not provided, the public cloud + audience will be assumed. """ def __init__( self, endpoint: str, index_name: str, credential: Union[AzureKeyCredential, AsyncTokenCredential], **kwargs: Any ) -> None: + audience = kwargs.pop("audience", None) + if audience: + kwargs.setdefault("credential_scopes", [audience.rstrip("/") + "/.default"]) super().__init__(endpoint=endpoint, credential=credential, index_name=index_name, **kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_patch.py index 5ec918657b5d..b34ab6b34db2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_operations/_patch.py @@ -48,7 +48,7 @@ def _convert_index_response(response: _SearchIndexResponse) -> _models.SearchInd normalizers=response.normalizers, encryption_key=response.encryption_key, similarity=response.similarity, - semantic_search=response.semantic, + semantic_search=response.semantic_search, vector_search=response.vector_search, e_tag=response.e_tag, ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_patch.py index 87676c65a8f0..0d7690d7a688 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_patch.py @@ -7,9 +7,68 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +from typing import Any, Union +from azure.core.credentials import AzureKeyCredential, TokenCredential -__all__: list[str] = [] # Add all objects you want publicly available to users at this package level +from ._client import SearchIndexClient as _SearchIndexClient +from ._client import SearchIndexerClient as _SearchIndexerClient + + +class SearchIndexClient(_SearchIndexClient): + """SearchIndexClient. + + :param endpoint: The endpoint URL of the search service. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. + :paramtype api_version: str + :keyword str audience: Sets the Audience to use for authentication with Microsoft Entra ID. The + audience is not considered when using a shared key. If audience is not provided, the public cloud + audience will be assumed. + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCredential], **kwargs: Any) -> None: + audience = kwargs.pop("audience", None) + if audience: + kwargs.setdefault("credential_scopes", [audience.rstrip("/") + "/.default"]) + super().__init__(endpoint=endpoint, credential=credential, **kwargs) + + +class SearchIndexerClient(_SearchIndexerClient): + """SearchIndexerClient. + + :param endpoint: The endpoint URL of the search service. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. + :paramtype api_version: str + :keyword str audience: Sets the Audience to use for authentication with Microsoft Entra ID. The + audience is not considered when using a shared key. If audience is not provided, the public cloud + audience will be assumed. + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCredential], **kwargs: Any) -> None: + audience = kwargs.pop("audience", None) + if audience: + kwargs.setdefault("credential_scopes", [audience.rstrip("/") + "/.default"]) + super().__init__(endpoint=endpoint, credential=credential, **kwargs) + + +__all__: list[str] = [ + "SearchIndexClient", + "SearchIndexerClient", +] def patch_sdk(): diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_patch.py index 87676c65a8f0..0388b8323482 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_patch.py @@ -7,9 +7,73 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +from typing import Any, Union +from azure.core.credentials import AzureKeyCredential +from azure.core.credentials_async import AsyncTokenCredential -__all__: list[str] = [] # Add all objects you want publicly available to users at this package level +from ._client import SearchIndexClient as _SearchIndexClient +from ._client import SearchIndexerClient as _SearchIndexerClient + + +class SearchIndexClient(_SearchIndexClient): + """SearchIndexClient. + + :param endpoint: The endpoint URL of the search service. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. + :paramtype api_version: str + :keyword str audience: Sets the Audience to use for authentication with Microsoft Entra ID. The + audience is not considered when using a shared key. If audience is not provided, the public cloud + audience will be assumed. + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, AsyncTokenCredential], **kwargs: Any + ) -> None: + audience = kwargs.pop("audience", None) + if audience: + kwargs.setdefault("credential_scopes", [audience.rstrip("/") + "/.default"]) + super().__init__(endpoint=endpoint, credential=credential, **kwargs) + + +class SearchIndexerClient(_SearchIndexerClient): + """SearchIndexerClient. + + :param endpoint: The endpoint URL of the search service. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. + :paramtype api_version: str + :keyword str audience: Sets the Audience to use for authentication with Microsoft Entra ID. The + audience is not considered when using a shared key. If audience is not provided, the public cloud + audience will be assumed. + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, AsyncTokenCredential], **kwargs: Any + ) -> None: + audience = kwargs.pop("audience", None) + if audience: + kwargs.setdefault("credential_scopes", [audience.rstrip("/") + "/.default"]) + super().__init__(endpoint=endpoint, credential=credential, **kwargs) + + +__all__: list[str] = [ + "SearchIndexClient", + "SearchIndexerClient", +] def patch_sdk(): diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_enums.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_enums.py index 194d9f33a598..6e79d65cc75d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_enums.py @@ -16,13 +16,11 @@ class AIFoundryModelCatalogName(str, Enum, metaclass=CaseInsensitiveEnumMeta): OPEN_AI_CLIP_IMAGE_TEXT_EMBEDDINGS_VIT_BASE_PATCH32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32" """OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32.""" - OPEN_AI_CLIP_IMAGE_TEXT_EMBEDDINGS_VI_T_LARGE_PATCH14_336 = ( - "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336" - ) + OPEN_AI_CLIP_IMAGE_TEXT_EMBEDDINGS_VIT_LARGE_PATCH14_336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336" """OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336.""" - FACEBOOK_DINO_V2_IMAGE_EMBEDDINGS_VI_T_BASE = "Facebook-DinoV2-Image-Embeddings-ViT-Base" + FACEBOOK_DINO_V2_IMAGE_EMBEDDINGS_VIT_BASE = "Facebook-DinoV2-Image-Embeddings-ViT-Base" """Facebook-DinoV2-Image-Embeddings-ViT-Base.""" - FACEBOOK_DINO_V2_IMAGE_EMBEDDINGS_VI_T_GIANT = "Facebook-DinoV2-Image-Embeddings-ViT-Giant" + FACEBOOK_DINO_V2_IMAGE_EMBEDDINGS_VIT_GIANT = "Facebook-DinoV2-Image-Embeddings-ViT-Giant" """Facebook-DinoV2-Image-Embeddings-ViT-Giant.""" COHERE_EMBED_V3_ENGLISH = "Cohere-embed-v3-english" """Cohere-embed-v3-english.""" @@ -1252,7 +1250,7 @@ class OcrSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Ho (Devanagiri).""" HU = "hu" """Hungarian.""" - IS_ENUM = "is" + IS = "is" """Icelandic.""" SMN = "smn" """Inari Sami.""" @@ -1753,7 +1751,7 @@ class SplitSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Hungarian.""" ID = "id" """Indonesian.""" - IS_ENUM = "is" + IS = "is" """Icelandic.""" IT = "it" """Italian.""" @@ -2036,7 +2034,7 @@ class TextTranslationSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta) """Hmong Daw.""" HU = "hu" """Hungarian.""" - IS_ENUM = "is" + IS = "is" """Icelandic.""" ID = "id" """Indonesian.""" diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py index 4683af974c89..7e846f38e912 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py @@ -2191,12 +2191,12 @@ class CustomAnalyzer(LexicalAnalyzer, discriminator="#Microsoft.Azure.Search.Cus underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. Required. :vartype name: str - :ivar tokenizer: The name of the tokenizer to use to divide continuous text into a sequence of - tokens, such as breaking a sentence into words. Required. Known values are: "classic", - "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", + :ivar tokenizer_name: The name of the tokenizer to use to divide continuous text into a + sequence of tokens, such as breaking a sentence into words. Required. Known values are: + "classic", "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", and "whitespace". - :vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :vartype tokenizer_name: str or ~azure.search.documents.indexes.models.LexicalTokenizerName :ivar token_filters: A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. @@ -2210,8 +2210,8 @@ class CustomAnalyzer(LexicalAnalyzer, discriminator="#Microsoft.Azure.Search.Cus :vartype odata_type: str """ - tokenizer: Union[str, "_models.LexicalTokenizerName"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] + tokenizer_name: Union[str, "_models.LexicalTokenizerName"] = rest_field( + name="tokenizer", visibility=["read", "create", "update", "delete", "query"] ) """The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. Required. Known values are: \"classic\", \"edgeNGram\", @@ -2239,7 +2239,7 @@ def __init__( self, *, name: str, - tokenizer: Union[str, "_models.LexicalTokenizerName"], + tokenizer_name: Union[str, "_models.LexicalTokenizerName"], token_filters: Optional[list[Union[str, "_models.TokenFilterName"]]] = None, char_filters: Optional[list[Union[str, "_models.CharFilterName"]]] = None, ) -> None: ... @@ -8768,8 +8768,9 @@ class SearchIndexResponse(_Model): creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. :vartype similarity: ~azure.search.documents.indexes.models.SimilarityAlgorithm - :ivar semantic: Defines parameters for a search index that influence semantic capabilities. - :vartype semantic: ~azure.search.documents.indexes.models.SemanticSearch + :ivar semantic_search: Defines parameters for a search index that influence semantic + capabilities. + :vartype semantic_search: ~azure.search.documents.indexes.models.SemanticSearch :ivar vector_search: Contains configuration options related to vector search. :vartype vector_search: ~azure.search.documents.indexes.models.VectorSearch :ivar e_tag: The ETag of the index. @@ -8838,8 +8839,8 @@ class SearchIndexResponse(_Model): """The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used.""" - semantic: Optional["_models.SemanticSearch"] = rest_field( - visibility=["read", "create", "update", "delete", "query"] + semantic_search: Optional["_models.SemanticSearch"] = rest_field( + name="semantic", visibility=["read", "create", "update", "delete", "query"] ) """Defines parameters for a search index that influence semantic capabilities.""" vector_search: Optional["_models.VectorSearch"] = rest_field( @@ -8867,7 +8868,7 @@ def __init__( normalizers: Optional[list["_models.LexicalNormalizer"]] = None, encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, similarity: Optional["_models.SimilarityAlgorithm"] = None, - semantic: Optional["_models.SemanticSearch"] = None, + semantic_search: Optional["_models.SemanticSearch"] = None, vector_search: Optional["_models.VectorSearch"] = None, e_tag: Optional[str] = None, ) -> None: ... diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_patch.py index 128bdd3bb4df..2fb18c507aa8 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_patch.py @@ -14,10 +14,7 @@ from ._models import KnowledgeBase as _KnowledgeBase from ._enums import ( LexicalAnalyzerName, - OcrSkillLanguage, SearchFieldDataType as _SearchFieldDataType, - SplitSkillLanguage, - TextTranslationSkillLanguage, ) if TYPE_CHECKING: @@ -160,11 +157,6 @@ def _collection_helper(typ: Any) -> str: SearchFieldDataType.GeographyPoint = SearchFieldDataType.GEOGRAPHY_POINT # type: ignore[attr-defined] SearchFieldDataType.ComplexType = SearchFieldDataType.COMPLEX # type: ignore[attr-defined] -# Backward-compatible alias: IS was renamed to IS_ENUM to avoid conflict with Python keyword -OcrSkillLanguage.IS = OcrSkillLanguage.IS_ENUM # type: ignore[attr-defined] -SplitSkillLanguage.IS = SplitSkillLanguage.IS_ENUM # type: ignore[attr-defined] -TextTranslationSkillLanguage.IS = TextTranslationSkillLanguage.IS_ENUM # type: ignore[attr-defined] - def Collection(typ: Any) -> str: """Helper function to create a collection type string. @@ -183,7 +175,7 @@ def Collection(typ: Any) -> str: def SimpleField( *, name: str, - type: Union[str, _SearchFieldDataType], + type: Union[str, SearchFieldDataType], key: bool = False, hidden: bool = False, filterable: bool = False, @@ -430,65 +422,14 @@ def ComplexField( return SearchField(**result) -class _RemovedModel: - """Base class for models that have been removed from the SDK. - - Allows import to succeed but raises an error on instantiation. - """ - - _removed_name: str = "" - _replacement_name: str = "" - - def __init__(self, *args: Any, **kwargs: Any) -> None: - raise ValueError(f"{self._removed_name} has been removed. Use {self._replacement_name} instead.") - - def __init_subclass__(cls, **kwargs: Any) -> None: - super().__init_subclass__(**kwargs) - # Allow direct tombstone class definitions (direct subclasses of _RemovedModel), - # but prevent further subclassing of tombstone classes. - if _RemovedModel not in cls.__bases__: - parent = cls.__bases__[0] - raise TypeError( - f"{getattr(parent, '_removed_name', parent.__name__)} has been removed and cannot be subclassed. " - f"Use {getattr(parent, '_replacement_name', '')} instead." - ) - - -class EntityRecognitionSkill(_RemovedModel): - """EntityRecognitionSkill has been removed. Use EntityRecognitionSkillV3 instead.""" - - _removed_name = "EntityRecognitionSkill" - _replacement_name = "EntityRecognitionSkillV3" - - -class EntityRecognitionSkillLanguage(_RemovedModel): - """EntityRecognitionSkillLanguage has been removed. Use EntityRecognitionSkillV3 instead.""" - - _removed_name = "EntityRecognitionSkillLanguage" - _replacement_name = "EntityRecognitionSkillV3" - - -class SentimentSkill(_RemovedModel): - """SentimentSkill has been removed. Use SentimentSkillV3 instead.""" - - _removed_name = "SentimentSkill" - _replacement_name = "SentimentSkillV3" - - __all__: list[str] = [ - "EntityRecognitionSkill", - "EntityRecognitionSkillLanguage", "KnowledgeBase", - "OcrSkillLanguage", "SearchField", "SearchFieldDataType", "SearchIndexerDataSourceConnection", - "SentimentSkill", "SimpleField", "SearchableField", "ComplexField", - "SplitSkillLanguage", - "TextTranslationSkillLanguage", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_client.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_client.py index 09be3b6eb7fa..678c50c3af24 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_client.py @@ -32,15 +32,25 @@ class KnowledgeBaseRetrievalClient(_KnowledgeBaseRetrievalClientOperationsMixin) credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" and None. Default value is "2026-04-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + def __init__( + self, + endpoint: str, + credential: Union[AzureKeyCredential, "TokenCredential"], + knowledge_base_name: str, + **kwargs: Any + ) -> None: _endpoint = "{endpoint}" - self._config = KnowledgeBaseRetrievalClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + self._config = KnowledgeBaseRetrievalClientConfiguration( + endpoint=endpoint, credential=credential, knowledge_base_name=knowledge_base_name, **kwargs + ) _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_configuration.py index 6820fa6faf5b..39290fccba96 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_configuration.py @@ -29,22 +29,33 @@ class KnowledgeBaseRetrievalClientConfiguration: # pylint: disable=too-many-ins credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" and None. Default value is "2026-04-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + def __init__( + self, + endpoint: str, + credential: Union[AzureKeyCredential, "TokenCredential"], + knowledge_base_name: str, + **kwargs: Any, + ) -> None: api_version: str = kwargs.pop("api_version", "2026-04-01") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") + if knowledge_base_name is None: + raise ValueError("Parameter 'knowledge_base_name' must not be None.") self.endpoint = endpoint self.credential = credential + self.knowledge_base_name = knowledge_base_name self.api_version = api_version self.credential_scopes = kwargs.pop("credential_scopes", ["https://search.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "search-documents/{}".format(VERSION)) diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_operations/_operations.py index 3bfd42374859..539027df674a 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_operations/_operations.py @@ -78,7 +78,6 @@ class _KnowledgeBaseRetrievalClientOperationsMixin( @overload def retrieve( self, - knowledge_base_name: str, retrieval_request: _models1.KnowledgeBaseRetrievalRequest, *, content_type: str = "application/json", @@ -86,8 +85,6 @@ def retrieve( ) -> _models1.KnowledgeBaseRetrievalResponse: """KnowledgeBase retrieves relevant data from backing stores. - :param knowledge_base_name: The name of the knowledge base. Required. - :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest @@ -102,17 +99,10 @@ def retrieve( @overload def retrieve( - self, - knowledge_base_name: str, - retrieval_request: JSON, - *, - content_type: str = "application/json", - **kwargs: Any + self, retrieval_request: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models1.KnowledgeBaseRetrievalResponse: """KnowledgeBase retrieves relevant data from backing stores. - :param knowledge_base_name: The name of the knowledge base. Required. - :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -126,17 +116,10 @@ def retrieve( @overload def retrieve( - self, - knowledge_base_name: str, - retrieval_request: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any + self, retrieval_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models1.KnowledgeBaseRetrievalResponse: """KnowledgeBase retrieves relevant data from backing stores. - :param knowledge_base_name: The name of the knowledge base. Required. - :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -150,15 +133,10 @@ def retrieve( @distributed_trace def retrieve( - self, - knowledge_base_name: str, - retrieval_request: Union[_models1.KnowledgeBaseRetrievalRequest, JSON, IO[bytes]], - **kwargs: Any + self, retrieval_request: Union[_models1.KnowledgeBaseRetrievalRequest, JSON, IO[bytes]], **kwargs: Any ) -> _models1.KnowledgeBaseRetrievalResponse: """KnowledgeBase retrieves relevant data from backing stores. - :param knowledge_base_name: The name of the knowledge base. Required. - :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Is one of the following types: KnowledgeBaseRetrievalRequest, JSON, IO[bytes] Required. :type retrieval_request: @@ -191,7 +169,7 @@ def retrieve( _content = json.dumps(retrieval_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_knowledge_base_retrieval_retrieve_request( - knowledge_base_name=knowledge_base_name, + knowledge_base_name=self._config.knowledge_base_name, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_patch.py index 87676c65a8f0..0464a4f98c4a 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/_patch.py @@ -7,9 +7,43 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +from typing import Any, Union +from azure.core.credentials import AzureKeyCredential, TokenCredential -__all__: list[str] = [] # Add all objects you want publicly available to users at this package level +from ._client import KnowledgeBaseRetrievalClient as _KnowledgeBaseRetrievalClient + + +class KnowledgeBaseRetrievalClient(_KnowledgeBaseRetrievalClient): + """KnowledgeBaseRetrievalClient. + + :param endpoint: The endpoint URL of the search service. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. + :paramtype api_version: str + :keyword str audience: Sets the Audience to use for authentication with Microsoft Entra ID. The + audience is not considered when using a shared key. If audience is not provided, the public cloud + audience will be assumed. + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCredential], **kwargs: Any) -> None: + audience = kwargs.pop("audience", None) + if audience: + kwargs.setdefault("credential_scopes", [audience.rstrip("/") + "/.default"]) + super().__init__(endpoint=endpoint, credential=credential, **kwargs) + + +__all__: list[str] = [ + "KnowledgeBaseRetrievalClient", +] def patch_sdk(): diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_client.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_client.py index 085ecb8d75a3..641861f05a62 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_client.py @@ -32,6 +32,8 @@ class KnowledgeBaseRetrievalClient(_KnowledgeBaseRetrievalClientOperationsMixin) credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" and None. Default value is "2026-04-01". Note that overriding this default value may result in unsupported behavior. @@ -39,10 +41,16 @@ class KnowledgeBaseRetrievalClient(_KnowledgeBaseRetrievalClientOperationsMixin) """ def __init__( - self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + self, + endpoint: str, + credential: Union[AzureKeyCredential, "AsyncTokenCredential"], + knowledge_base_name: str, + **kwargs: Any ) -> None: _endpoint = "{endpoint}" - self._config = KnowledgeBaseRetrievalClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + self._config = KnowledgeBaseRetrievalClientConfiguration( + endpoint=endpoint, credential=credential, knowledge_base_name=knowledge_base_name, **kwargs + ) _policies = kwargs.pop("policies", None) if _policies is None: diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_configuration.py index 5b03df359c71..831286c43092 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_configuration.py @@ -29,6 +29,8 @@ class KnowledgeBaseRetrievalClientConfiguration: # pylint: disable=too-many-ins credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" and None. Default value is "2026-04-01". Note that overriding this default value may result in unsupported behavior. @@ -36,7 +38,11 @@ class KnowledgeBaseRetrievalClientConfiguration: # pylint: disable=too-many-ins """ def __init__( - self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + self, + endpoint: str, + credential: Union[AzureKeyCredential, "AsyncTokenCredential"], + knowledge_base_name: str, + **kwargs: Any, ) -> None: api_version: str = kwargs.pop("api_version", "2026-04-01") @@ -44,9 +50,12 @@ def __init__( raise ValueError("Parameter 'endpoint' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") + if knowledge_base_name is None: + raise ValueError("Parameter 'knowledge_base_name' must not be None.") self.endpoint = endpoint self.credential = credential + self.knowledge_base_name = knowledge_base_name self.api_version = api_version self.credential_scopes = kwargs.pop("credential_scopes", ["https://search.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "search-documents/{}".format(VERSION)) diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_operations/_operations.py index 03cd83e2cf8e..db31fdc21cc7 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_operations/_operations.py @@ -46,7 +46,6 @@ class _KnowledgeBaseRetrievalClientOperationsMixin( @overload async def retrieve( self, - knowledge_base_name: str, retrieval_request: _models2.KnowledgeBaseRetrievalRequest, *, content_type: str = "application/json", @@ -54,8 +53,6 @@ async def retrieve( ) -> _models2.KnowledgeBaseRetrievalResponse: """KnowledgeBase retrieves relevant data from backing stores. - :param knowledge_base_name: The name of the knowledge base. Required. - :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: ~azure.search.documents.knowledgebases.models.KnowledgeBaseRetrievalRequest @@ -70,17 +67,10 @@ async def retrieve( @overload async def retrieve( - self, - knowledge_base_name: str, - retrieval_request: JSON, - *, - content_type: str = "application/json", - **kwargs: Any + self, retrieval_request: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models2.KnowledgeBaseRetrievalResponse: """KnowledgeBase retrieves relevant data from backing stores. - :param knowledge_base_name: The name of the knowledge base. Required. - :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. @@ -94,17 +84,10 @@ async def retrieve( @overload async def retrieve( - self, - knowledge_base_name: str, - retrieval_request: IO[bytes], - *, - content_type: str = "application/json", - **kwargs: Any + self, retrieval_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models2.KnowledgeBaseRetrievalResponse: """KnowledgeBase retrieves relevant data from backing stores. - :param knowledge_base_name: The name of the knowledge base. Required. - :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Required. :type retrieval_request: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. @@ -118,15 +101,10 @@ async def retrieve( @distributed_trace_async async def retrieve( - self, - knowledge_base_name: str, - retrieval_request: Union[_models2.KnowledgeBaseRetrievalRequest, JSON, IO[bytes]], - **kwargs: Any + self, retrieval_request: Union[_models2.KnowledgeBaseRetrievalRequest, JSON, IO[bytes]], **kwargs: Any ) -> _models2.KnowledgeBaseRetrievalResponse: """KnowledgeBase retrieves relevant data from backing stores. - :param knowledge_base_name: The name of the knowledge base. Required. - :type knowledge_base_name: str :param retrieval_request: The retrieval request to process. Is one of the following types: KnowledgeBaseRetrievalRequest, JSON, IO[bytes] Required. :type retrieval_request: @@ -159,7 +137,7 @@ async def retrieve( _content = json.dumps(retrieval_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_knowledge_base_retrieval_retrieve_request( - knowledge_base_name=knowledge_base_name, + knowledge_base_name=self._config.knowledge_base_name, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_patch.py index 87676c65a8f0..5a69656c99ec 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/knowledgebases/aio/_patch.py @@ -7,9 +7,46 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +from typing import Any, Union +from azure.core.credentials import AzureKeyCredential +from azure.core.credentials_async import AsyncTokenCredential -__all__: list[str] = [] # Add all objects you want publicly available to users at this package level +from ._client import KnowledgeBaseRetrievalClient as _KnowledgeBaseRetrievalClient + + +class KnowledgeBaseRetrievalClient(_KnowledgeBaseRetrievalClient): + """KnowledgeBaseRetrievalClient. + + :param endpoint: The endpoint URL of the search service. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :param knowledge_base_name: The name of the knowledge base. Required. + :type knowledge_base_name: str + :keyword api_version: The API version to use for this operation. Known values are "2026-04-01" + and None. Default value is "2026-04-01". Note that overriding this default value may result in + unsupported behavior. + :paramtype api_version: str + :keyword str audience: Sets the Audience to use for authentication with Microsoft Entra ID. The + audience is not considered when using a shared key. If audience is not provided, the public cloud + audience will be assumed. + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, AsyncTokenCredential], **kwargs: Any + ) -> None: + audience = kwargs.pop("audience", None) + if audience: + kwargs.setdefault("credential_scopes", [audience.rstrip("/") + "/.default"]) + super().__init__(endpoint=endpoint, credential=credential, **kwargs) + + +__all__: list[str] = [ + "KnowledgeBaseRetrievalClient", +] def patch_sdk(): diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py index f114daf84f72..3445685e3c69 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py @@ -15,7 +15,6 @@ from ._models import ( # type: ignore AutocompleteItem, - DebugInfo, DocumentDebugInfo, ErrorAdditionalInfo, ErrorDetail, @@ -28,8 +27,6 @@ QueryAnswerResult, QueryCaptionResult, QueryResultDocumentSubscores, - SearchDocumentsResult, - SearchRequest, SearchResult, SingleVectorFieldResult, SuggestResult, @@ -43,6 +40,7 @@ ) from ._enums import ( # type: ignore + AutocompleteMode, IndexActionType, QueryAnswerType, QueryCaptionType, @@ -62,7 +60,6 @@ __all__ = [ "AutocompleteItem", - "DebugInfo", "DocumentDebugInfo", "ErrorAdditionalInfo", "ErrorDetail", @@ -75,8 +72,6 @@ "QueryAnswerResult", "QueryCaptionResult", "QueryResultDocumentSubscores", - "SearchDocumentsResult", - "SearchRequest", "SearchResult", "SingleVectorFieldResult", "SuggestResult", @@ -87,6 +82,7 @@ "VectorizableTextQuery", "VectorizedQuery", "VectorsDebugInfo", + "AutocompleteMode", "IndexActionType", "QueryAnswerType", "QueryCaptionType", diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/_enums.py b/sdk/search/azure-search-documents/azure/search/documents/models/_enums.py index 617894eb5ecb..b28ea43a2725 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/models/_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/models/_enums.py @@ -133,7 +133,7 @@ class ScoringStatistics(str, Enum, metaclass=CaseInsensitiveEnumMeta): LOCAL = "local" """The scoring statistics will be calculated locally for lower latency.""" - GLOBAL_ENUM = "global" + GLOBAL = "global" """The scoring statistics will be calculated globally for more consistent scoring.""" diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/models/_models.py index 7ff6a0acf51d..e41c0387b65c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/models/_models.py @@ -349,7 +349,7 @@ class SearchDocumentsResult(_Model): :vartype answers: list[~azure.search.documents.models.QueryAnswerResult] :ivar next_page_parameters: Continuation JSON payload returned when the query can't return all the requested results in a single response. You can use this JSON along with. - :vartype next_page_parameters: ~azure.search.documents.models.SearchRequest + :vartype next_page_parameters: ~azure.search.documents.models._models.SearchRequest :ivar results: The sequence of results returned by the query. Required. :vartype results: list[~azure.search.documents.models.SearchResult] :ivar next_link: Continuation URL returned when the query can't return all the requested @@ -382,7 +382,7 @@ class SearchDocumentsResult(_Model): answers: Optional[list["_models.QueryAnswerResult"]] = rest_field(name="@search.answers", visibility=["read"]) """The answers query results for the search operation; null if the answers query parameter was not specified or set to 'none'.""" - next_page_parameters: Optional["_models.SearchRequest"] = rest_field( + next_page_parameters: Optional["_models._models.SearchRequest"] = rest_field( name="@search.nextPageParameters", visibility=["read"] ) """Continuation JSON payload returned when the query can't return all the requested results in a diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/models/_patch.py index 5c41cf547f37..264ae9ac9a63 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/models/_patch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/models/_patch.py @@ -13,10 +13,7 @@ from ._models import IndexDocumentsBatch as IndexDocumentsBatchGenerated from ._models import IndexAction -from ._enums import AutocompleteMode, IndexActionType, ScoringStatistics - -# Backward-compatible alias: IS was renamed to IS_ENUM to avoid conflict with Python keyword -ScoringStatistics.Global = ScoringStatistics.GLOBAL_ENUM # type: ignore[attr-defined] +from ._enums import IndexActionType def _flatten_args(args: Tuple[Union[List[Dict[Any, Any]], List[List[Dict[Any, Any]]]], ...]) -> List[Dict]: @@ -206,9 +203,7 @@ def _extend_batch(self, documents: List[Dict], action_type: str) -> List[IndexAc IndexDocumentsBatch.__module__ = "azure.search.documents" __all__: list[str] = [ - "AutocompleteMode", "IndexDocumentsBatch", - "ScoringStatistics", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/search/azure-search-documents/tests/test_search_client.py b/sdk/search/azure-search-documents/tests/test_search_client.py index eebac852258d..3dab93c9802c 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client.py +++ b/sdk/search/azure-search-documents/tests/test_search_client.py @@ -12,10 +12,12 @@ from azure.search.documents.models import ( FacetResult, - SearchDocumentsResult, SearchResult, ) +# Internal type used to mock the wire response from _search_post +from azure.search.documents.models._models import SearchDocumentsResult + from azure.search.documents import ( IndexDocumentsBatch, SearchClient, diff --git a/sdk/search/azure-search-documents/tests/test_search_client_async.py b/sdk/search/azure-search-documents/tests/test_search_client_async.py index 826802f59386..635119655111 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client_async.py +++ b/sdk/search/azure-search-documents/tests/test_search_client_async.py @@ -7,9 +7,11 @@ from azure.search.documents.aio._operations._patch import AsyncSearchPageIterator from azure.search.documents.models import ( FacetResult, - SearchDocumentsResult, SearchResult, ) + +# Internal type used to mock the wire response from _search_post +from azure.search.documents.models._models import SearchDocumentsResult from azure.search.documents.aio import SearchClient from test_search_index_client_async import await_prepared_test diff --git a/sdk/search/azure-search-documents/tsp-location.yaml b/sdk/search/azure-search-documents/tsp-location.yaml index 77ce3ceb4d3e..a5ea144911ab 100644 --- a/sdk/search/azure-search-documents/tsp-location.yaml +++ b/sdk/search/azure-search-documents/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/search/data-plane/Search -commit: 3a1a604f7bb618bb965b4ec7bee181138e4b7767 +commit: 70c6d1a7c8f9607e09a1d1c9f17f079bbb73f054 repo: Azure/azure-rest-api-specs \ No newline at end of file