Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion docs/release_notes.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,13 @@
- Extend `OpenAiClientException` and `OrchestrationClientException` to retrieve error diagnostics information received from remote service.
New available accessors for troubleshooting: `getErrorResponse()`, `getHttpResponse()` and, `getHttpRequest()`.
Please note: depending on the error response, these methods may return `null` if the information is not available.

- [OpenAI] Added new models for `OpenAiModel`: `GPT_5`, `GPT_5_MINI` and `GPT_5_NANO`.
- [Orchestration] Added new models for `OrchestrationAiModel`: `GPT_5`, `GPT_5_MINI` and
`GPT_5_NANO`.
- [Orchestration] Deprecated models for `OrchestrationAiModel`: `GEMINI_1_5_PRO` and
`OrchestrationAiModel.GEMINI_1_5_FLASH`
- Replacement are `GEMINI_2_5_PRO` and `GEMINI_2_5_FLASH`.
- [Orchestration] Deprecated `OrchestrationAiModel.IBM_GRANITE_13B_CHAT` with no replacement.

### 📈 Improvements

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,15 @@ public record OpenAiModel(@Nonnull String name, @Nullable String version) implem
/** Azure OpenAI GPT-4.1-mini model */
public static final OpenAiModel GPT_41_MINI = new OpenAiModel("gpt-4.1-mini", null);

/** Azure OpenAI GPT-5 model */
public static final OpenAiModel GPT_5 = new OpenAiModel("gpt-5", null);

/** Azure OpenAI GPT-5-mini model */
public static final OpenAiModel GPT_5_MINI = new OpenAiModel("gpt-5-mini", null);

/** Azure OpenAI GPT-5-nano model */
public static final OpenAiModel GPT_5_NANO = new OpenAiModel("gpt-5-nano", null);

/**
* Azure OpenAI Text Embedding ADA 002 model
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,13 @@ class OpenAiClientGeneratedTest extends BaseOpenAiClientTest {

@Test
void openAiModels() {
var model = OpenAiModel.GPT_4;
var model = OpenAiModel.GPT_5;
var newModel = model.withVersion("v1");

assertThat(model.name()).isEqualTo("gpt-4");
assertThat(model.name()).isEqualTo("gpt-5");
assertThat(model.version()).isNull();

assertThat(newModel.name()).isEqualTo("gpt-4");
assertThat(newModel.name()).isEqualTo("gpt-5");
assertThat(newModel.version()).isEqualTo("v1");

assertThat(model).isNotSameAs(newModel);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,13 @@ public class OrchestrationAiModel {
/** The version of the model, defaults to "latest". */
String version;

/** IBM Granite 13B Chat model */
/**
* IBM Granite 13B Chat model
*
* @deprecated This model is deprecated on AI Core with a planned retirement not earlier than
* 2025-09-30.
*/
@Deprecated
public static final OrchestrationAiModel IBM_GRANITE_13B_CHAT =
new OrchestrationAiModel("ibm--granite-13b-chat");

Expand Down Expand Up @@ -228,6 +234,15 @@ public class OrchestrationAiModel {
/** Azure OpenAI o3 model */
public static final OrchestrationAiModel OPENAI_O3 = new OrchestrationAiModel("o3");

/** Azure OpenAI GPT-5 model */
public static final OrchestrationAiModel GPT_5 = new OrchestrationAiModel("gpt-5");

/** Azure OpenAI GPT-5-mini model */
public static final OrchestrationAiModel GPT_5_MINI = new OrchestrationAiModel("gpt-5-mini");

/** Azure OpenAI GPT-5-nano model */
public static final OrchestrationAiModel GPT_5_NANO = new OrchestrationAiModel("gpt-5-nano");

/**
* Google Cloud Platform Gemini 1.0 Pro model
*
Expand All @@ -239,11 +254,23 @@ public class OrchestrationAiModel {
public static final OrchestrationAiModel GEMINI_1_0_PRO =
new OrchestrationAiModel("gemini-1.0-pro");

/** Google Cloud Platform Gemini 1.5 Pro model */
/**
* Google Cloud Platform Gemini 1.5 Pro model
*
* @deprecated This model is deprecated on AI Core with a planned retirement on 2025-09-24. The
* suggested replacement model is {@link OrchestrationAiModel#GEMINI_2_5_PRO}.
*/
@Deprecated
public static final OrchestrationAiModel GEMINI_1_5_PRO =
new OrchestrationAiModel("gemini-1.5-pro");

/** Google Cloud Platform Gemini 1.5 Flash model */
/**
* Google Cloud Platform Gemini 1.5 Flash model
*
* @deprecated This model is deprecated on AI Core with a planned retirement on 2025-09-24. The
* suggested replacement model is {@link OrchestrationAiModel#GEMINI_2_5_FLASH}.
*/
@Deprecated
public static final OrchestrationAiModel GEMINI_1_5_FLASH =
new OrchestrationAiModel("gemini-1.5-flash");

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
package com.sap.ai.sdk.orchestration.spring;

import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GEMINI_1_5_FLASH;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GEMINI_2_5_FLASH;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.Parameter.FREQUENCY_PENALTY;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.Parameter.MAX_TOKENS;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.Parameter.PRESENCE_PENALTY;
Expand All @@ -16,7 +16,7 @@
class OrchestrationChatOptionsTest {

static final OrchestrationAiModel CUSTOM_LLM =
GEMINI_1_5_FLASH
GEMINI_2_5_FLASH
.withParam(FREQUENCY_PENALTY, 0.5)
.withParam(MAX_TOKENS, 100)
.withParam(PRESENCE_PENALTY, 0.5)
Expand All @@ -26,8 +26,8 @@ class OrchestrationChatOptionsTest {
.withParam(TOP_P, 0.5);

private static void assertCustomLLM(OrchestrationChatOptions opts) {
assertThat(opts.getModel()).isEqualTo(GEMINI_1_5_FLASH.getName());
assertThat(opts.getModelVersion()).isEqualTo(GEMINI_1_5_FLASH.getVersion());
assertThat(opts.getModel()).isEqualTo(GEMINI_2_5_FLASH.getName());
assertThat(opts.getModelVersion()).isEqualTo(GEMINI_2_5_FLASH.getVersion());
assertThat(opts.getFrequencyPenalty()).isEqualTo(0.5);
assertThat(opts.getMaxTokens()).isEqualTo(100);
assertThat(opts.getPresencePenalty()).isEqualTo(0.5);
Expand All @@ -41,10 +41,10 @@ private static void assertCustomLLM(OrchestrationChatOptions opts) {
void testParametersAreInherited() {
var opts =
new OrchestrationChatOptions(
new OrchestrationModuleConfig().withLlmConfig(GEMINI_1_5_FLASH));
new OrchestrationModuleConfig().withLlmConfig(GEMINI_2_5_FLASH));

assertThat(opts.getModel()).isEqualTo(GEMINI_1_5_FLASH.getName());
assertThat(opts.getModelVersion()).isEqualTo(GEMINI_1_5_FLASH.getVersion());
assertThat(opts.getModel()).isEqualTo(GEMINI_2_5_FLASH.getName());
assertThat(opts.getModelVersion()).isEqualTo(GEMINI_2_5_FLASH.getVersion());
}

@Test
Expand All @@ -59,11 +59,11 @@ void testCustomParametersAreInherited() {
void testCopy() {
var opts =
new OrchestrationChatOptions(
new OrchestrationModuleConfig().withLlmConfig(GEMINI_1_5_FLASH));
new OrchestrationModuleConfig().withLlmConfig(GEMINI_2_5_FLASH));

var copy = (OrchestrationChatOptions) opts.copy();
assertThat(copy.getModel()).isEqualTo(GEMINI_1_5_FLASH.getName());
assertThat(copy.getModelVersion()).isEqualTo(GEMINI_1_5_FLASH.getVersion());
assertThat(copy.getModel()).isEqualTo(GEMINI_2_5_FLASH.getName());
assertThat(copy.getModelVersion()).isEqualTo(GEMINI_2_5_FLASH.getVersion());
}

@Test
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
package com.sap.ai.sdk.app.services;

import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GEMINI_1_5_FLASH;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GEMINI_2_5_FLASH;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GPT_4O_MINI;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.Parameter.TEMPERATURE;
import static com.sap.ai.sdk.orchestration.model.SAPDocumentTranslation.TypeEnum.SAP_DOCUMENT_TRANSLATION;
Expand Down Expand Up @@ -50,7 +50,7 @@ public class OrchestrationService {

@Getter
private final OrchestrationModuleConfig config =
new OrchestrationModuleConfig().withLlmConfig(GEMINI_1_5_FLASH.withParam(TEMPERATURE, 0.0));
new OrchestrationModuleConfig().withLlmConfig(GEMINI_2_5_FLASH.withParam(TEMPERATURE, 0.0));

/**
* Chat request to OpenAI through the Orchestration service with a simple prompt.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
package com.sap.ai.sdk.app.services;

import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GEMINI_1_5_FLASH;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GEMINI_2_5_FLASH;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GPT_4O;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GPT_4O_MINI;

Expand Down Expand Up @@ -132,7 +132,7 @@ public ChatResponse inputFiltering(@Nonnull final AzureFilterThreshold policy)
new AzureContentFilter().hate(policy).selfHarm(policy).sexual(policy).violence(policy);
val opts =
new OrchestrationChatOptions(
config.withLlmConfig(GEMINI_1_5_FLASH).withInputFiltering(filterConfig));
config.withLlmConfig(GEMINI_2_5_FLASH).withInputFiltering(filterConfig));

val prompt =
new Prompt(
Expand All @@ -157,7 +157,7 @@ public ChatResponse outputFiltering(@Nonnull final AzureFilterThreshold policy)
new AzureContentFilter().hate(policy).selfHarm(policy).sexual(policy).violence(policy);
val opts =
new OrchestrationChatOptions(
config.withLlmConfig(GEMINI_1_5_FLASH).withOutputFiltering(filterConfig));
config.withLlmConfig(GEMINI_2_5_FLASH).withOutputFiltering(filterConfig));

val prompt =
new Prompt(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
package com.sap.ai.sdk.app.controllers;

import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GEMINI_1_5_FLASH;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.GEMINI_2_5_FLASH;
import static com.sap.ai.sdk.orchestration.OrchestrationAiModel.Parameter.TEMPERATURE;
import static com.sap.ai.sdk.orchestration.model.AzureThreshold.*;
import static com.sap.ai.sdk.orchestration.model.ResponseChatMessage.RoleEnum.ASSISTANT;
Expand Down Expand Up @@ -42,7 +42,7 @@
class OrchestrationTest {
private final OrchestrationClient client = new OrchestrationClient();
private final OrchestrationModuleConfig config =
new OrchestrationModuleConfig().withLlmConfig(GEMINI_1_5_FLASH.withParam(TEMPERATURE, 0.0));
new OrchestrationModuleConfig().withLlmConfig(GEMINI_2_5_FLASH.withParam(TEMPERATURE, 0.0));
OrchestrationService service;

@BeforeEach
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,17 @@
import com.sap.ai.sdk.core.model.AiModelBaseData;
import com.sap.ai.sdk.core.model.AiModelVersion;
import com.sap.ai.sdk.foundationmodels.openai.OpenAiModel;
import com.sap.ai.sdk.orchestration.OrchestrationAiModel;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Optional;
import lombok.SneakyThrows;
import org.assertj.core.api.SoftAssertions;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;

class ScenarioTest {

@Disabled("https://github.com/SAP/ai-sdk-java-backlog/issues/306")
@Test
@DisplayName(
"Declared OpenAI models must be superset of our AI Core account's available OpenAI models")
Expand Down Expand Up @@ -62,6 +61,57 @@ void openAiModelAvailability() {
softly.assertAll();
}

@Test
@DisplayName(
"Declared Orchestration models must be superset of our AI Core account's available Orchestration models")
@SneakyThrows
void orchestrationAiModelAvailability() {

// Gather AI Core's list of available Orchestration models
final var aiModelList = new ScenarioController().getModels().getResources();

final var availableOrchestrationModels =
aiModelList.stream()
.filter(
model ->
model.getAllowedScenarios().stream()
.anyMatch(scenario -> scenario.getScenarioId().equals("orchestration")))
Copy link
Copy Markdown
Contributor Author

@CharlesDuboisSAP CharlesDuboisSAP Aug 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

new scenario param that makes this test possible

.filter(model -> !model.getModel().contains("embed"))
.collect(
() -> new HashMap<String, Boolean>(),
(list, model) -> list.put(model.getModel(), isDeprecated(model)),
HashMap::putAll);

// Gather our declared Orchestration models
Field[] declaredFields = OrchestrationAiModel.class.getFields();

// get the models from the OrchestrationAiModel class
HashMap<String, Boolean> declaredOrchestrationModelList = new HashMap<>();
for (Field field : declaredFields) {
if (field.getType().equals(OrchestrationAiModel.class)) {
declaredOrchestrationModelList.put(
((OrchestrationAiModel) field.get(null)).getName(),
field.isAnnotationPresent(Deprecated.class));
}
}

// Assert that the declared Orchestration models match the expected list
assertThat(declaredOrchestrationModelList.keySet())
.containsAll(availableOrchestrationModels.keySet());

SoftAssertions softly = new SoftAssertions();
for (var model : availableOrchestrationModels.entrySet()) {
Boolean declaredDeprecated = declaredOrchestrationModelList.get(model.getKey());
softly
.assertThat(declaredDeprecated)
.withFailMessage(
"%s is deprecated:%s on AI Core but deprecated:%s in AI SDK",
model.getKey(), model.getValue(), declaredDeprecated)
.isEqualTo(model.getValue());
}
softly.assertAll();
}

private static boolean isDeprecated(AiModelBaseData model) {
Optional<AiModelVersion> version =
model.getVersions().stream().filter(AiModelVersion::isIsLatest).findFirst();
Expand Down
Loading