diff --git a/README.md b/README.md index 37e35fc..3260469 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,8 @@
This method sets up the OllamaAPI client, either using an external Ollama host (if
+ * environment variables are set) or by starting a Testcontainers-based Ollama instance. It also
+ * configures request timeout and model pull retry settings.
+ */
+ @BeforeAll
+ static void setUp() {
+ // ... (no javadoc needed for private setup logic)
+ int requestTimeoutSeconds = 60;
+ int numberOfRetriesForModelPull = 5;
+
+ try {
+ String useExternalOllamaHostEnv = System.getenv("USE_EXTERNAL_OLLAMA_HOST");
+ String ollamaHostEnv = System.getenv("OLLAMA_HOST");
+
+ boolean useExternalOllamaHost;
+ String ollamaHost;
+
+ if (useExternalOllamaHostEnv == null && ollamaHostEnv == null) {
+ Properties props = new Properties();
+ try {
+ props.load(
+ OllamaAPIIntegrationTest.class
+ .getClassLoader()
+ .getResourceAsStream("test-config.properties"));
+ } catch (Exception e) {
+ throw new RuntimeException(
+ "Could not load test-config.properties from classpath", e);
+ }
+ useExternalOllamaHost =
+ Boolean.parseBoolean(
+ props.getProperty("USE_EXTERNAL_OLLAMA_HOST", "false"));
+ ollamaHost = props.getProperty("OLLAMA_HOST");
+ requestTimeoutSeconds =
+ Integer.parseInt(props.getProperty("REQUEST_TIMEOUT_SECONDS"));
+ numberOfRetriesForModelPull =
+ Integer.parseInt(props.getProperty("NUMBER_RETRIES_FOR_MODEL_PULL"));
+ } else {
+ useExternalOllamaHost = Boolean.parseBoolean(useExternalOllamaHostEnv);
+ ollamaHost = ollamaHostEnv;
+ }
+
+ if (useExternalOllamaHost) {
+ LOG.info("Using external Ollama host: {}", ollamaHost);
+ api = new OllamaAPI(ollamaHost);
+ } else {
+ throw new RuntimeException(
+ "USE_EXTERNAL_OLLAMA_HOST is not set so, we will be using Testcontainers"
+ + " Ollama host for the tests now. If you would like to use an external"
+ + " host, please set the env var to USE_EXTERNAL_OLLAMA_HOST=true and"
+ + " set the env var OLLAMA_HOST=http://localhost:11435 or a different"
+ + " host/port.");
+ }
+ } catch (Exception e) {
+ String ollamaVersion = "0.6.1";
+ int internalPort = 11434;
+ int mappedPort = 11435;
+ ollama = new OllamaContainer("ollama/ollama:" + ollamaVersion);
+ ollama.addExposedPort(internalPort);
+ List Scenario: Ensures the API client fails gracefully when the Ollama server is unreachable.
+ */
+ @Test
+ @Order(1)
+ void shouldThrowConnectExceptionForWrongEndpoint() {
+ OllamaAPI ollamaAPI = new OllamaAPI("http://wrong-host:11434");
+ assertThrows(OllamaBaseException.class, ollamaAPI::listModels);
+ }
+
+ /**
+ * Tests retrieval of the Ollama server version.
+ *
+ * Scenario: Calls the /api/version endpoint and asserts a non-null version string is
+ * returned.
+ */
+ @Test
+ @Order(1)
+ void shouldReturnVersionFromVersionAPI() throws OllamaBaseException {
+ String version = api.getVersion();
+ assertNotNull(version);
+ }
+
+ /**
+ * Tests the /api/ping endpoint for server liveness.
+ *
+ * Scenario: Ensures the Ollama server responds to ping requests.
+ */
+ @Test
+ @Order(1)
+ void shouldPingSuccessfully() throws OllamaBaseException {
+ boolean pingResponse = api.ping();
+ assertTrue(pingResponse, "Ping should return true");
+ }
+
+ /**
+ * Tests listing all available models from the Ollama server.
+ *
+ * Scenario: Calls /api/tags and verifies the returned list is not null (may be empty).
+ */
+ @Test
+ @Order(2)
+ void shouldListModels() throws OllamaBaseException {
+ List Scenario: Pulls an embedding model, then checks that it is present in the list of models.
+ */
+ @Test
+ @Order(3)
+ void shouldPullModelAndListModels() throws OllamaBaseException {
+ api.pullModel(EMBEDDING_MODEL);
+ List Scenario: Pulls a model and retrieves its details, asserting the model file contains the
+ * model name.
+ */
+ @Test
+ @Order(4)
+ void shouldGetModelDetails() throws OllamaBaseException {
+ api.pullModel(EMBEDDING_MODEL);
+ ModelDetail modelDetails = api.getModelDetails(EMBEDDING_MODEL);
+ assertNotNull(modelDetails);
+ assertTrue(modelDetails.getModelFile().contains(EMBEDDING_MODEL));
+ }
+
+ /**
+ * Tests generating embeddings for a batch of input texts.
+ *
+ * Scenario: Uses the embedding model to generate vector embeddings for two input sentences.
+ */
+ @Test
+ @Order(5)
+ void shouldReturnEmbeddings() throws Exception {
+ api.pullModel(EMBEDDING_MODEL);
+ OllamaEmbedRequestModel m = new OllamaEmbedRequestModel();
+ m.setModel(EMBEDDING_MODEL);
+ m.setInput(Arrays.asList("Why is the sky blue?", "Why is the grass green?"));
+ OllamaEmbedResponseModel embeddings = api.embed(m);
+ assertNotNull(embeddings, "Embeddings should not be null");
+ assertFalse(embeddings.getEmbeddings().isEmpty(), "Embeddings should not be empty");
+ }
+
+ /**
+ * Tests generating structured output using the 'format' parameter.
+ *
+ * Scenario: Calls generateWithFormat with a prompt and a JSON schema, expecting a structured
+ * response. Usage: generate with format, no thinking, no streaming.
+ */
+ @Test
+ @Order(6)
+ void shouldGenerateWithStructuredOutput() throws OllamaBaseException {
+ api.pullModel(TOOLS_MODEL);
+
+ String prompt =
+ "The sun is shining brightly and is directly overhead at the zenith, casting my"
+ + " shadow over my foot, so it must be noon.";
+
+ Map Scenario: Calls generate with a general-purpose model, no thinking, no streaming, no
+ * format. Usage: generate, raw=false, think=false, no streaming.
+ */
+ @Test
+ @Order(6)
+ void shouldGenerateWithDefaultOptions() throws OllamaBaseException {
+ api.pullModel(GENERAL_PURPOSE_MODEL);
+ boolean raw = false;
+ boolean thinking = false;
+ OllamaGenerateRequest request =
+ OllamaGenerateRequestBuilder.builder()
+ .withModel(GENERAL_PURPOSE_MODEL)
+ .withPrompt(
+ "What is the capital of France? And what's France's connection with"
+ + " Mona Lisa?")
+ .withRaw(raw)
+ .withThink(thinking)
+ .withOptions(new OptionsBuilder().build())
+ .build();
+ OllamaGenerateStreamObserver handler = null;
+ OllamaResult result = api.generate(request, handler);
+ assertNotNull(result);
+ assertNotNull(result.getResponse());
+ assertFalse(result.getResponse().isEmpty());
+ }
+
+ /**
+ * Tests text generation with streaming enabled.
+ *
+ * Scenario: Calls generate with a general-purpose model, streaming the response tokens.
+ * Usage: generate, raw=false, think=false, streaming enabled.
+ */
+ @Test
+ @Order(7)
+ void shouldGenerateWithDefaultOptionsStreamed() throws OllamaBaseException {
+ api.pullModel(GENERAL_PURPOSE_MODEL);
+ boolean raw = false;
+ OllamaGenerateRequest request =
+ OllamaGenerateRequestBuilder.builder()
+ .withModel(GENERAL_PURPOSE_MODEL)
+ .withPrompt(
+ "What is the capital of France? And what's France's connection with"
+ + " Mona Lisa?")
+ .withRaw(raw)
+ .withThink(false)
+ .withOptions(new OptionsBuilder().build())
+ .build();
+ OllamaGenerateStreamObserver handler = null;
+ OllamaResult result =
+ api.generate(
+ request,
+ new OllamaGenerateStreamObserver(
+ null, new ConsoleOutputGenerateTokenHandler()));
+ assertNotNull(result);
+ assertNotNull(result.getResponse());
+ assertFalse(result.getResponse().isEmpty());
+ }
+
+ /**
+ * Tests chat API with custom options (e.g., temperature).
+ *
+ * Scenario: Builds a chat request with system and user messages, sets a custom temperature,
+ * and verifies the response. Usage: chat, no tools, no thinking, no streaming, custom options.
+ */
+ @Test
+ @Order(8)
+ void shouldGenerateWithCustomOptions() throws OllamaBaseException {
+ api.pullModel(GENERAL_PURPOSE_MODEL);
+
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(GENERAL_PURPOSE_MODEL);
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.SYSTEM,
+ "You are a helpful assistant who can generate random person's first"
+ + " and last names in the format [First name, Last name].")
+ .build();
+ requestModel =
+ builder.withMessages(requestModel.getMessages())
+ .withMessage(OllamaChatMessageRole.USER, "Give me a cool name")
+ .withOptions(new OptionsBuilder().setTemperature(0.5f).build())
+ .build();
+ OllamaChatResult chatResult = api.chat(requestModel, null);
+
+ assertNotNull(chatResult);
+ assertNotNull(chatResult.getResponseModel());
+ assertFalse(chatResult.getResponseModel().getMessage().getResponse().isEmpty());
+ }
+
+ /**
+ * Tests chat API with a system prompt and verifies the assistant's response.
+ *
+ * Scenario: Sends a system prompt instructing the assistant to reply with a specific word,
+ * then checks the response. Usage: chat, no tools, no thinking, no streaming, system prompt.
+ */
+ @Test
+ @Order(9)
+ void shouldChatWithSystemPrompt() throws OllamaBaseException {
+ api.pullModel(GENERAL_PURPOSE_MODEL);
+
+ String expectedResponse = "Bhai";
+
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(GENERAL_PURPOSE_MODEL);
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.SYSTEM,
+ String.format(
+ "[INSTRUCTION-START] You are an obidient and helpful bot"
+ + " named %s. You always answer with only one word and"
+ + " that word is your name. [INSTRUCTION-END]",
+ expectedResponse))
+ .withMessage(OllamaChatMessageRole.USER, "Who are you?")
+ .withOptions(new OptionsBuilder().setTemperature(0.0f).build())
+ .build();
+
+ OllamaChatResult chatResult = api.chat(requestModel, null);
+ assertNotNull(chatResult);
+ assertNotNull(chatResult.getResponseModel());
+ assertNotNull(chatResult.getResponseModel().getMessage());
+ assertFalse(chatResult.getResponseModel().getMessage().getResponse().isBlank());
+ assertTrue(
+ chatResult
+ .getResponseModel()
+ .getMessage()
+ .getResponse()
+ .contains(expectedResponse));
+ assertEquals(3, chatResult.getChatHistory().size());
+ }
+
+ /**
+ * Tests chat API with multi-turn conversation (chat history).
+ *
+ * Scenario: Sends a sequence of user messages, each time including the chat history, and
+ * verifies the assistant's responses. Usage: chat, no tools, no thinking, no streaming,
+ * multi-turn.
+ */
+ @Test
+ @Order(10)
+ void shouldChatWithHistory() throws Exception {
+ api.pullModel(THINKING_TOOL_MODEL);
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(THINKING_TOOL_MODEL);
+
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.USER, "What is 1+1? Answer only in numbers.")
+ .build();
+
+ OllamaChatResult chatResult = api.chat(requestModel, null);
+
+ assertNotNull(chatResult);
+ assertNotNull(chatResult.getChatHistory());
+ assertNotNull(chatResult.getChatHistory().stream());
+
+ requestModel =
+ builder.withMessages(chatResult.getChatHistory())
+ .withMessage(OllamaChatMessageRole.USER, "And what is its squared value?")
+ .build();
+
+ chatResult = api.chat(requestModel, null);
+
+ assertNotNull(chatResult);
+ assertNotNull(chatResult.getChatHistory());
+ assertNotNull(chatResult.getChatHistory().stream());
+
+ requestModel =
+ builder.withMessages(chatResult.getChatHistory())
+ .withMessage(
+ OllamaChatMessageRole.USER,
+ "What is the largest value between 2, 4 and 6?")
+ .build();
+
+ chatResult = api.chat(requestModel, null);
+
+ assertNotNull(chatResult, "Chat result should not be null");
+ assertTrue(
+ chatResult.getChatHistory().size() > 2,
+ "Chat history should contain more than two messages");
+ }
+
+ /**
+ * Tests chat API with explicit tool invocation (client does not handle tools).
+ *
+ * Scenario: Registers a tool, sends a user message that triggers a tool call, and verifies
+ * the tool call and arguments. Usage: chat, explicit tool, useTools=false, no thinking, no
+ * streaming.
+ */
+ @Test
+ @Order(11)
+ void shouldChatWithExplicitTool() throws OllamaBaseException {
+ String theToolModel = TOOLS_MODEL;
+ api.pullModel(theToolModel);
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(theToolModel);
+
+ api.registerTool(EmployeeFinderToolSpec.getSpecification());
+
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.USER,
+ "Give me the ID and address of the employee Rahul Kumar.")
+ .build();
+ requestModel.setOptions(new OptionsBuilder().setTemperature(0.9f).build().getOptionsMap());
+ requestModel.setUseTools(true);
+ OllamaChatResult chatResult = api.chat(requestModel, null);
+
+ assertNotNull(chatResult, "chatResult should not be null");
+ assertNotNull(chatResult.getResponseModel(), "Response model should not be null");
+ assertNotNull(
+ chatResult.getResponseModel().getMessage(), "Response message should not be null");
+ assertEquals(
+ OllamaChatMessageRole.ASSISTANT.getRoleName(),
+ chatResult.getResponseModel().getMessage().getRole().getRoleName(),
+ "Role of the response message should be ASSISTANT");
+ List Scenario: Registers a tool, enables useTools, sends a user message, and verifies the
+ * assistant's tool call. Usage: chat, explicit tool, useTools=true, no thinking, no streaming.
+ */
+ @Test
+ @Order(13)
+ void shouldChatWithExplicitToolAndUseTools() throws OllamaBaseException {
+ String theToolModel = TOOLS_MODEL;
+ api.pullModel(theToolModel);
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(theToolModel);
+
+ api.registerTool(EmployeeFinderToolSpec.getSpecification());
+
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.USER,
+ "Give me the ID and address of the employee Rahul Kumar.")
+ .build();
+ requestModel.setOptions(new OptionsBuilder().setTemperature(0.9f).build().getOptionsMap());
+ requestModel.setUseTools(true);
+ OllamaChatResult chatResult = api.chat(requestModel, null);
+
+ assertNotNull(chatResult, "chatResult should not be null");
+ assertNotNull(chatResult.getResponseModel(), "Response model should not be null");
+ assertNotNull(
+ chatResult.getResponseModel().getMessage(), "Response message should not be null");
+ assertEquals(
+ OllamaChatMessageRole.ASSISTANT.getRoleName(),
+ chatResult.getResponseModel().getMessage().getRole().getRoleName(),
+ "Role of the response message should be ASSISTANT");
+
+ boolean toolCalled = false;
+ List Scenario: Registers a tool, sends a user message, and streams the assistant's response
+ * (with tool call). Usage: chat, explicit tool, useTools=false, streaming enabled.
+ */
+ @Test
+ @Order(14)
+ void shouldChatWithToolsAndStream() throws OllamaBaseException {
+ String theToolModel = TOOLS_MODEL;
+ api.pullModel(theToolModel);
+
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(theToolModel);
+
+ api.registerTool(EmployeeFinderToolSpec.getSpecification());
+
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.USER,
+ "Give me the ID and address of employee Rahul Kumar")
+ .withKeepAlive("0m")
+ .withOptions(new OptionsBuilder().setTemperature(0.9f).build())
+ .build();
+ requestModel.setUseTools(true);
+ OllamaChatResult chatResult = api.chat(requestModel, new ConsoleOutputChatTokenHandler());
+
+ assertNotNull(chatResult, "chatResult should not be null");
+ assertNotNull(chatResult.getResponseModel(), "Response model should not be null");
+ assertNotNull(
+ chatResult.getResponseModel().getMessage(), "Response message should not be null");
+ assertEquals(
+ OllamaChatMessageRole.ASSISTANT.getRoleName(),
+ chatResult.getResponseModel().getMessage().getRole().getRoleName(),
+ "Role of the response message should be ASSISTANT");
+ List Scenario: Registers annotated tools, sends a user message that triggers a tool call, and
+ * verifies the tool call and arguments. Usage: chat, annotated tool, no thinking, no streaming.
+ */
+ @Test
+ @Order(12)
+ void shouldChatWithAnnotatedToolSingleParam() throws OllamaBaseException {
+ String theToolModel = TOOLS_MODEL;
+ api.pullModel(theToolModel);
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(theToolModel);
+
+ api.registerAnnotatedTools();
+
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.USER,
+ "Compute the most important constant in the world using 5 digits")
+ .build();
+ requestModel.setUseTools(true);
+ OllamaChatResult chatResult = api.chat(requestModel, null);
+ assertNotNull(chatResult);
+ assertNotNull(chatResult.getResponseModel());
+ assertNotNull(chatResult.getResponseModel().getMessage());
+ assertEquals(
+ OllamaChatMessageRole.ASSISTANT.getRoleName(),
+ chatResult.getResponseModel().getMessage().getRole().getRoleName());
+ List Scenario: Registers annotated tools, sends a user message that may trigger a tool call
+ * with multiple arguments. Usage: chat, annotated tool, no thinking, no streaming, multiple
+ * parameters.
+ *
+ * Note: This test is non-deterministic due to model variability; some assertions are
+ * commented out.
+ */
+ @Test
+ @Order(13)
+ void shouldChatWithAnnotatedToolMultipleParams() throws OllamaBaseException {
+ String theToolModel = TOOLS_MODEL;
+ api.pullModel(theToolModel);
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(theToolModel);
+
+ api.registerAnnotatedTools(new AnnotatedTool());
+
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.USER,
+ "Greet Rahul with a lot of hearts and respond to me with count of"
+ + " emojis that have been in used in the greeting")
+ .build();
+
+ OllamaChatResult chatResult = api.chat(requestModel, null);
+ assertNotNull(chatResult);
+ assertNotNull(chatResult.getResponseModel());
+ assertNotNull(chatResult.getResponseModel().getMessage());
+ assertEquals(
+ OllamaChatMessageRole.ASSISTANT.getRoleName(),
+ chatResult.getResponseModel().getMessage().getRole().getRoleName());
+ }
+
+ /**
+ * Tests chat API with streaming enabled (no tools, no thinking).
+ *
+ * Scenario: Sends a user message and streams the assistant's response. Usage: chat, no
+ * tools, no thinking, streaming enabled.
+ */
+ @Test
+ @Order(15)
+ void shouldChatWithStream() throws OllamaBaseException {
+ api.deregisterTools();
+ api.pullModel(GENERAL_PURPOSE_MODEL);
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(GENERAL_PURPOSE_MODEL);
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.USER,
+ "What is the capital of France? And what's France's connection with"
+ + " Mona Lisa?")
+ .build();
+ requestModel.setThink(false);
+
+ OllamaChatResult chatResult = api.chat(requestModel, new ConsoleOutputChatTokenHandler());
+ assertNotNull(chatResult);
+ assertNotNull(chatResult.getResponseModel());
+ assertNotNull(chatResult.getResponseModel().getMessage());
+ assertNotNull(chatResult.getResponseModel().getMessage().getResponse());
+ }
+
+ /**
+ * Tests chat API with thinking and streaming enabled.
+ *
+ * Scenario: Sends a user message with thinking enabled and streams the assistant's response.
+ * Usage: chat, no tools, thinking enabled, streaming enabled.
+ */
+ @Test
+ @Order(15)
+ void shouldChatWithThinkingAndStream() throws OllamaBaseException {
+ api.pullModel(THINKING_TOOL_MODEL_2);
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(THINKING_TOOL_MODEL_2);
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.USER,
+ "What is the capital of France? And what's France's connection with"
+ + " Mona Lisa?")
+ .withThinking(true)
+ .withKeepAlive("0m")
+ .build();
+
+ OllamaChatResult chatResult = api.chat(requestModel, new ConsoleOutputChatTokenHandler());
+
+ assertNotNull(chatResult);
+ assertNotNull(chatResult.getResponseModel());
+ assertNotNull(chatResult.getResponseModel().getMessage());
+ assertNotNull(chatResult.getResponseModel().getMessage().getResponse());
+ }
+
+ /**
+ * Tests chat API with an image input from a URL.
+ *
+ * Scenario: Sends a user message with an image URL and verifies the assistant's response.
+ * Usage: chat, vision model, image from URL, no tools, no thinking, no streaming.
+ */
+ @Test
+ @Order(10)
+ void shouldChatWithImageFromURL()
+ throws OllamaBaseException, IOException, InterruptedException {
+ api.pullModel(VISION_MODEL);
+
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(VISION_MODEL);
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.USER,
+ "What's in the picture?",
+ Collections.emptyList(),
+ "https://t3.ftcdn.net/jpg/02/96/63/80/360_F_296638053_0gUVA4WVBKceGsIr7LNqRWSnkusi07dq.jpg")
+ .build();
+ api.registerAnnotatedTools(new OllamaAPIIntegrationTest());
+
+ OllamaChatResult chatResult = api.chat(requestModel, null);
+ assertNotNull(chatResult);
+ }
+
+ /**
+ * Tests chat API with an image input from a file and multi-turn history.
+ *
+ * Scenario: Sends a user message with an image file, then continues the conversation with
+ * chat history. Usage: chat, vision model, image from file, multi-turn, no tools, no thinking,
+ * no streaming.
+ */
+ @Test
+ @Order(10)
+ void shouldChatWithImageFromFileAndHistory() throws OllamaBaseException {
+ api.pullModel(VISION_MODEL);
+ OllamaChatRequestBuilder builder =
+ OllamaChatRequestBuilder.builder().withModel(VISION_MODEL);
+ OllamaChatRequest requestModel =
+ builder.withMessage(
+ OllamaChatMessageRole.USER,
+ "What's in the picture?",
+ Collections.emptyList(),
+ List.of(getImageFileFromClasspath("emoji-smile.jpeg")))
+ .build();
+
+ OllamaChatResult chatResult = api.chat(requestModel, null);
+ assertNotNull(chatResult);
+ assertNotNull(chatResult.getResponseModel());
+ builder.reset();
+
+ requestModel =
+ builder.withMessages(chatResult.getChatHistory())
+ .withMessage(OllamaChatMessageRole.USER, "What's the color?")
+ .build();
+
+ chatResult = api.chat(requestModel, null);
+ assertNotNull(chatResult);
+ assertNotNull(chatResult.getResponseModel());
+ }
+
// /**
- // * Initializes the OllamaAPI instance for integration tests.
+ // * Tests generateWithImages using an image URL as input.
// *
- // * This method sets up the OllamaAPI client, either using an external Ollama host (if
- // * environment variables are set) or by starting a Testcontainers-based Ollama instance.
- // It also
- // * configures request timeout and model pull retry settings.
- // */
- // @BeforeAll
- // static void setUp() {
- // // ... (no javadoc needed for private setup logic)
- // int requestTimeoutSeconds = 60;
- // int numberOfRetriesForModelPull = 5;
- //
- // try {
- // String useExternalOllamaHostEnv = System.getenv("USE_EXTERNAL_OLLAMA_HOST");
- // String ollamaHostEnv = System.getenv("OLLAMA_HOST");
- //
- // boolean useExternalOllamaHost;
- // String ollamaHost;
- //
- // if (useExternalOllamaHostEnv == null && ollamaHostEnv == null) {
- // Properties props = new Properties();
- // try {
- // props.load(
- // OllamaAPIIntegrationTest.class
- // .getClassLoader()
- // .getResourceAsStream("test-config.properties"));
- // } catch (Exception e) {
- // throw new RuntimeException(
- // "Could not load test-config.properties from classpath", e);
- // }
- // useExternalOllamaHost =
- // Boolean.parseBoolean(
- // props.getProperty("USE_EXTERNAL_OLLAMA_HOST", "false"));
- // ollamaHost = props.getProperty("OLLAMA_HOST");
- // requestTimeoutSeconds =
- // Integer.parseInt(props.getProperty("REQUEST_TIMEOUT_SECONDS"));
- // numberOfRetriesForModelPull =
- // Integer.parseInt(props.getProperty("NUMBER_RETRIES_FOR_MODEL_PULL"));
- // } else {
- // useExternalOllamaHost = Boolean.parseBoolean(useExternalOllamaHostEnv);
- // ollamaHost = ollamaHostEnv;
- // }
- //
- // if (useExternalOllamaHost) {
- // LOG.info("Using external Ollama host: {}", ollamaHost);
- // api = new OllamaAPI(ollamaHost);
- // } else {
- // throw new RuntimeException(
- // "USE_EXTERNAL_OLLAMA_HOST is not set so, we will be using
- // Testcontainers"
- // + " Ollama host for the tests now. If you would like to use an
- // external"
- // + " host, please set the env var to USE_EXTERNAL_OLLAMA_HOST=true
- // and"
- // + " set the env var OLLAMA_HOST=http://localhost:11435 or a
- // different"
- // + " host/port.");
- // }
- // } catch (Exception e) {
- // String ollamaVersion = "0.6.1";
- // int internalPort = 11434;
- // int mappedPort = 11435;
- // ollama = new OllamaContainer("ollama/ollama:" + ollamaVersion);
- // ollama.addExposedPort(internalPort);
- // List Scenario: Ensures the API client fails gracefully when the Ollama server is
- // unreachable.
+ // * Scenario: Calls generateWithImages with a vision model and an image URL,expecting a
+ // * non-empty response. Usage: generateWithImages, image from URL, no streaming.
// */
// @Test
- // @Order(1)
- // void shouldThrowConnectExceptionForWrongEndpoint() {
- // OllamaAPI ollamaAPI = new OllamaAPI("http://wrong-host:11434");
- // assertThrows(OllamaBaseException.class, ollamaAPI::listModels);
- // }
+ // @Order(17)
+ // void shouldGenerateWithImageURLs()
+ // throws OllamaBaseException {
+ // api.pullModel(VISION_MODEL);
//
- // /**
- // * Tests retrieval of the Ollama server version.
- // *
- // * Scenario: Calls the /api/version endpoint and asserts a non-null version string is
- // * returned.
- // */
- // @Test
- // @Order(1)
- // void shouldReturnVersionFromVersionAPI() throws OllamaBaseException {
- // String version = api.getVersion();
- // assertNotNull(version);
- // }
+ // OllamaResult result =
+ // api.generateWithImages(
+ // VISION_MODEL,
+ // "What is in this image?",
+ // List.of(
//
- // /**
- // * Tests the /api/ping endpoint for server liveness.
- // *
- // * Scenario: Ensures the Ollama server responds to ping requests.
- // */
- // @Test
- // @Order(1)
- // void shouldPingSuccessfully() throws OllamaBaseException {
- // boolean pingResponse = api.ping();
- // assertTrue(pingResponse, "Ping should return true");
- // }
- //
- // /**
- // * Tests listing all available models from the Ollama server.
- // *
- // * Scenario: Calls /api/tags and verifies the returned list is not null (may be empty).
- // */
- // @Test
- // @Order(2)
- // void shouldListModels() throws OllamaBaseException {
- // List Scenario: Pulls an embedding model, then checks that it is present in the list of
- // models.
- // */
- // @Test
- // @Order(3)
- // void shouldPullModelAndListModels() throws OllamaBaseException {
- // api.pullModel(EMBEDDING_MODEL);
- // List Scenario: Pulls a model and retrieves its details, asserting the model file contains
- // the
- // * model name.
- // */
- // @Test
- // @Order(4)
- // void shouldGetModelDetails() throws OllamaBaseException {
- // api.pullModel(EMBEDDING_MODEL);
- // ModelDetail modelDetails = api.getModelDetails(EMBEDDING_MODEL);
- // assertNotNull(modelDetails);
- // assertTrue(modelDetails.getModelFile().contains(EMBEDDING_MODEL));
- // }
- //
- // /**
- // * Tests generating embeddings for a batch of input texts.
- // *
- // * Scenario: Uses the embedding model to generate vector embeddings for two input
- // sentences.
- // */
- // @Test
- // @Order(5)
- // void shouldReturnEmbeddings() throws Exception {
- // api.pullModel(EMBEDDING_MODEL);
- // OllamaEmbedRequestModel m = new OllamaEmbedRequestModel();
- // m.setModel(EMBEDDING_MODEL);
- // m.setInput(Arrays.asList("Why is the sky blue?", "Why is the grass green?"));
- // OllamaEmbedResponseModel embeddings = api.embed(m);
- // assertNotNull(embeddings, "Embeddings should not be null");
- // assertFalse(embeddings.getEmbeddings().isEmpty(), "Embeddings should not be empty");
- // }
- //
- // /**
- // * Tests generating structured output using the 'format' parameter.
- // *
- // * Scenario: Calls generateWithFormat with a prompt and a JSON schema, expecting a
- // structured
- // * response. Usage: generate with format, no thinking, no streaming.
- // */
- // @Test
- // @Order(6)
- // void shouldGenerateWithStructuredOutput() throws OllamaBaseException {
- // api.pullModel(TOOLS_MODEL);
- //
- // String prompt =
- // "The sun is shining brightly and is directly overhead at the zenith, casting
- // my"
- // + " shadow over my foot, so it must be noon.";
- //
- // Map Scenario: Calls generate with a general-purpose model, no thinking, no streaming, no
- // * format. Usage: generate, raw=false, think=false, no streaming.
- // */
- // @Test
- // @Order(6)
- // void shouldGenerateWithDefaultOptions() throws OllamaBaseException {
- // api.pullModel(GENERAL_PURPOSE_MODEL);
- // boolean raw = false;
- // boolean thinking = false;
- // OllamaGenerateRequest request =
- // OllamaGenerateRequestBuilder.builder()
- // .withModel(GENERAL_PURPOSE_MODEL)
- // .withPrompt(
- // "What is the capital of France? And what's France's connection
- // with"
- // + " Mona Lisa?")
- // .withRaw(raw)
- // .withThink(thinking)
- // .withOptions(new OptionsBuilder().build())
- // .build();
- // OllamaGenerateStreamObserver handler = null;
- // OllamaResult result = api.generate(request, handler);
+ // "https://i.pinimg.com/736x/f9/4e/cb/f94ecba040696a3a20b484d2e15159ec.jpg"),
+ // new OptionsBuilder().build(),
+ // null,
+ // null);
// assertNotNull(result);
// assertNotNull(result.getResponse());
// assertFalse(result.getResponse().isEmpty());
// }
- //
+
+ /**
+ * Tests generateWithImages using an image file as input.
+ *
+ * Scenario: Calls generateWithImages with a vision model and an image file, expecting a
+ * non-empty response. Usage: generateWithImages, image from file, no streaming.
+ */
+ @Test
+ @Order(18)
+ void shouldGenerateWithImageFiles() throws OllamaBaseException {
+ api.pullModel(VISION_MODEL);
+ try {
+ OllamaGenerateRequest request =
+ OllamaGenerateRequestBuilder.builder()
+ .withModel(VISION_MODEL)
+ .withPrompt("What is in this image?")
+ .withRaw(false)
+ .withThink(false)
+ .withOptions(new OptionsBuilder().build())
+ .withImages(List.of(getImageFileFromClasspath("roses.jpg")))
+ .withFormat(null)
+ .withKeepAlive("0m")
+ .build();
+ OllamaGenerateStreamObserver handler = null;
+ OllamaResult result = api.generate(request, handler);
+ assertNotNull(result);
+ assertNotNull(result.getResponse());
+ assertFalse(result.getResponse().isEmpty());
+ } catch (OllamaBaseException e) {
+ fail(e);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ /**
+ * Tests generateWithImages with image file input and streaming enabled.
+ *
+ * Scenario: Calls generateWithImages with a vision model, an image file, and a streaming
+ * handler for the response. Usage: generateWithImages, image from file, streaming enabled.
+ */
+ @Test
+ @Order(20)
+ void shouldGenerateWithImageFilesAndResponseStreamed() throws OllamaBaseException, IOException {
+ api.pullModel(VISION_MODEL);
+ OllamaGenerateRequest request =
+ OllamaGenerateRequestBuilder.builder()
+ .withModel(VISION_MODEL)
+ .withPrompt("What is in this image?")
+ .withRaw(false)
+ .withThink(false)
+ .withOptions(new OptionsBuilder().build())
+ .withImages(List.of(getImageFileFromClasspath("roses.jpg")))
+ .withFormat(null)
+ .withKeepAlive("0m")
+ .build();
+ OllamaGenerateStreamObserver handler =
+ new OllamaGenerateStreamObserver(
+ new ConsoleOutputGenerateTokenHandler(),
+ new ConsoleOutputGenerateTokenHandler());
+ OllamaResult result = api.generate(request, handler);
+ assertNotNull(result);
+ assertNotNull(result.getResponse());
+ assertFalse(result.getResponse().isEmpty());
+ }
+
+ /**
+ * Tests generate with thinking enabled (no streaming).
+ *
+ * Scenario: Calls generate with think=true, expecting both response and thinking fields to
+ * be populated. Usage: generate, think=true, no streaming.
+ */
+ @Test
+ @Order(20)
+ void shouldGenerateWithThinking() throws OllamaBaseException {
+ api.pullModel(THINKING_TOOL_MODEL);
+
+ boolean raw = false;
+ boolean think = true;
+
+ OllamaGenerateRequest request =
+ OllamaGenerateRequestBuilder.builder()
+ .withModel(THINKING_TOOL_MODEL)
+ .withPrompt("Who are you?")
+ .withRaw(raw)
+ .withThink(think)
+ .withOptions(new OptionsBuilder().build())
+ .withFormat(null)
+ .withKeepAlive("0m")
+ .build();
+ OllamaGenerateStreamObserver handler = new OllamaGenerateStreamObserver(null, null);
+
+ OllamaResult result = api.generate(request, handler);
+ assertNotNull(result);
+ assertNotNull(result.getResponse());
+ assertNotNull(result.getThinking());
+ }
+
+ /**
+ * Tests generate with thinking and streaming enabled.
+ *
+ * Scenario: Calls generate with think=true and a stream handler for both thinking and
+ * response tokens. Usage: generate, think=true, streaming enabled.
+ */
+ @Test
+ @Order(20)
+ void shouldGenerateWithThinkingAndStreamHandler() throws OllamaBaseException {
+ api.pullModel(THINKING_TOOL_MODEL);
+ boolean raw = false;
+ OllamaGenerateRequest request =
+ OllamaGenerateRequestBuilder.builder()
+ .withModel(THINKING_TOOL_MODEL)
+ .withPrompt("Who are you?")
+ .withRaw(raw)
+ .withThink(true)
+ .withOptions(new OptionsBuilder().build())
+ .withFormat(null)
+ .withKeepAlive("0m")
+ .build();
+ OllamaGenerateStreamObserver handler =
+ new OllamaGenerateStreamObserver(
+ thinkingToken -> {
+ LOG.info(thinkingToken.toUpperCase());
+ },
+ resToken -> {
+ LOG.info(resToken.toLowerCase());
+ });
+
+ OllamaResult result = api.generate(request, handler);
+ assertNotNull(result);
+ assertNotNull(result.getResponse());
+ assertNotNull(result.getThinking());
+ }
+
+ /**
+ * Tests generate with raw=true parameter.
+ *
+ * Scenario: Calls generate with raw=true, which sends the prompt as-is without any
+ * formatting. Usage: generate, raw=true, no thinking, no streaming.
+ */
+ @Test
+ @Order(21)
+ void shouldGenerateWithRawMode() throws OllamaBaseException {
+ api.pullModel(GENERAL_PURPOSE_MODEL);
+ api.unloadModel(GENERAL_PURPOSE_MODEL);
+ boolean raw = true;
+ boolean thinking = false;
+ OllamaGenerateRequest request =
+ OllamaGenerateRequestBuilder.builder()
+ .withModel(GENERAL_PURPOSE_MODEL)
+ .withPrompt("What is 2+2?")
+ .withRaw(raw)
+ .withThink(thinking)
+ .withOptions(new OptionsBuilder().build())
+ .withFormat(null)
+ .withKeepAlive("0m")
+ .build();
+ OllamaGenerateStreamObserver handler = new OllamaGenerateStreamObserver(null, null);
+ OllamaResult result = api.generate(request, handler);
+ assertNotNull(result);
+ assertNotNull(result.getResponse());
+ assertFalse(result.getResponse().isEmpty());
+ }
+
+ /**
+ * Tests generate with raw=true and streaming enabled.
+ *
+ * Scenario: Calls generate with raw=true and streams the response. Usage: generate,
+ * raw=true, no thinking, streaming enabled.
+ */
+ @Test
+ @Order(22)
+ void shouldGenerateWithRawModeAndStreaming() throws OllamaBaseException {
+ api.pullModel(GENERAL_PURPOSE_MODEL);
+ boolean raw = true;
+ OllamaGenerateRequest request =
+ OllamaGenerateRequestBuilder.builder()
+ .withModel(GENERAL_PURPOSE_MODEL)
+ .withPrompt("What is the largest planet in our solar system?")
+ .withRaw(raw)
+ .withThink(false)
+ .withOptions(new OptionsBuilder().build())
+ .withFormat(null)
+ .withKeepAlive("0m")
+ .build();
+ OllamaGenerateStreamObserver handler =
+ new OllamaGenerateStreamObserver(null, new ConsoleOutputGenerateTokenHandler());
+ OllamaResult result = api.generate(request, handler);
+
+ assertNotNull(result);
+ assertNotNull(result.getResponse());
+ assertFalse(result.getResponse().isEmpty());
+ }
+
// /**
- // * Tests text generation with streaming enabled.
+ // * Tests generate with raw=true and thinking enabled.
// *
- // * Scenario: Calls generate with a general-purpose model, streaming the response
- // tokens.
- // * Usage: generate, raw=false, think=false, streaming enabled.
+ // * Scenario: Calls generate with raw=true and think=true combination. Usage: generate,
+ // * raw=true, thinking enabled, no streaming.
// */
// @Test
- // @Order(7)
- // void shouldGenerateWithDefaultOptionsStreamed() throws OllamaBaseException {
- // api.pullModel(GENERAL_PURPOSE_MODEL);
- // boolean raw = false;
- // OllamaGenerateRequest request =
- // OllamaGenerateRequestBuilder.builder()
- // .withModel(GENERAL_PURPOSE_MODEL)
- // .withPrompt(
- // "What is the capital of France? And what's France's connection
- // with"
- // + " Mona Lisa?")
- // .withRaw(raw)
- // .withThink(false)
- // .withOptions(new OptionsBuilder().build())
- // .build();
- // OllamaGenerateStreamObserver handler = null;
+ // @Order(23)
+ // void shouldGenerateWithRawModeAndThinking()
+ // throws OllamaBaseException
+ // {
+ // api.pullModel(THINKING_TOOL_MODEL_2);
+ // api.unloadModel(THINKING_TOOL_MODEL_2);
+ // boolean raw =
+ // true; // if true no formatting will be applied to the prompt. You may choose
+ // to use
+ // // the raw parameter if you are specifying a full templated prompt in your
+ // // request to the API
+ // boolean thinking = true;
// OllamaResult result =
// api.generate(
- // request,
- // new OllamaGenerateStreamObserver(
- // null, new ConsoleOutputGenerateTokenHandler()));
- // assertNotNull(result);
- // assertNotNull(result.getResponse());
- // assertFalse(result.getResponse().isEmpty());
- // }
- //
- // /**
- // * Tests chat API with custom options (e.g., temperature).
- // *
- // * Scenario: Builds a chat request with system and user messages, sets a custom
- // temperature,
- // * and verifies the response. Usage: chat, no tools, no thinking, no streaming, custom
- // options.
- // */
- // @Test
- // @Order(8)
- // void shouldGenerateWithCustomOptions() throws OllamaBaseException {
- // api.pullModel(GENERAL_PURPOSE_MODEL);
- //
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(GENERAL_PURPOSE_MODEL);
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.SYSTEM,
- // "You are a helpful assistant who can generate random person's
- // first"
- // + " and last names in the format [First name, Last
- // name].")
- // .build();
- // requestModel =
- // builder.withMessages(requestModel.getMessages())
- // .withMessage(OllamaChatMessageRole.USER, "Give me a cool name")
- // .withOptions(new OptionsBuilder().setTemperature(0.5f).build())
- // .build();
- // OllamaChatResult chatResult = api.chat(requestModel, null);
- //
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getResponseModel());
- // assertFalse(chatResult.getResponseModel().getMessage().getResponse().isEmpty());
- // }
- //
- // /**
- // * Tests chat API with a system prompt and verifies the assistant's response.
- // *
- // * Scenario: Sends a system prompt instructing the assistant to reply with a specific
- // word,
- // * then checks the response. Usage: chat, no tools, no thinking, no streaming, system
- // prompt.
- // */
- // @Test
- // @Order(9)
- // void shouldChatWithSystemPrompt() throws OllamaBaseException {
- // api.pullModel(GENERAL_PURPOSE_MODEL);
- //
- // String expectedResponse = "Bhai";
- //
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(GENERAL_PURPOSE_MODEL);
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.SYSTEM,
- // String.format(
- // "[INSTRUCTION-START] You are an obidient and helpful
- // bot"
- // + " named %s. You always answer with only one word
- // and"
- // + " that word is your name. [INSTRUCTION-END]",
- // expectedResponse))
- // .withMessage(OllamaChatMessageRole.USER, "Who are you?")
- // .withOptions(new OptionsBuilder().setTemperature(0.0f).build())
- // .build();
- //
- // OllamaChatResult chatResult = api.chat(requestModel, null);
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getResponseModel());
- // assertNotNull(chatResult.getResponseModel().getMessage());
- // assertFalse(chatResult.getResponseModel().getMessage().getResponse().isBlank());
- // assertTrue(
- // chatResult
- // .getResponseModel()
- // .getMessage()
- // .getResponse()
- // .contains(expectedResponse));
- // assertEquals(3, chatResult.getChatHistory().size());
- // }
- //
- // /**
- // * Tests chat API with multi-turn conversation (chat history).
- // *
- // * Scenario: Sends a sequence of user messages, each time including the chat history,
- // and
- // * verifies the assistant's responses. Usage: chat, no tools, no thinking, no streaming,
- // * multi-turn.
- // */
- // @Test
- // @Order(10)
- // void shouldChatWithHistory() throws Exception {
- // api.pullModel(THINKING_TOOL_MODEL);
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(THINKING_TOOL_MODEL);
- //
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER, "What is 1+1? Answer only in
- // numbers.")
- // .build();
- //
- // OllamaChatResult chatResult = api.chat(requestModel, null);
- //
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getChatHistory());
- // assertNotNull(chatResult.getChatHistory().stream());
- //
- // requestModel =
- // builder.withMessages(chatResult.getChatHistory())
- // .withMessage(OllamaChatMessageRole.USER, "And what is its squared
- // value?")
- // .build();
- //
- // chatResult = api.chat(requestModel, null);
- //
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getChatHistory());
- // assertNotNull(chatResult.getChatHistory().stream());
- //
- // requestModel =
- // builder.withMessages(chatResult.getChatHistory())
- // .withMessage(
- // OllamaChatMessageRole.USER,
- // "What is the largest value between 2, 4 and 6?")
- // .build();
- //
- // chatResult = api.chat(requestModel, null);
- //
- // assertNotNull(chatResult, "Chat result should not be null");
- // assertTrue(
- // chatResult.getChatHistory().size() > 2,
- // "Chat history should contain more than two messages");
- // }
- //
- // /**
- // * Tests chat API with explicit tool invocation (client does not handle tools).
- // *
- // * Scenario: Registers a tool, sends a user message that triggers a tool call, and
- // verifies
- // * the tool call and arguments. Usage: chat, explicit tool, useTools=false, no thinking,
- // no
- // * streaming.
- // */
- // @Test
- // @Order(11)
- // void shouldChatWithExplicitTool() throws OllamaBaseException {
- // String theToolModel = TOOLS_MODEL;
- // api.pullModel(theToolModel);
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(theToolModel);
- //
- // api.registerTool(employeeFinderTool());
- //
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "Give me the ID and address of the employee Rahul Kumar.")
- // .build();
- // requestModel.setOptions(new
- // OptionsBuilder().setTemperature(0.9f).build().getOptionsMap());
- // requestModel.setUseTools(true);
- // OllamaChatResult chatResult = api.chat(requestModel, null);
- //
- // assertNotNull(chatResult, "chatResult should not be null");
- // assertNotNull(chatResult.getResponseModel(), "Response model should not be null");
- // assertNotNull(
- // chatResult.getResponseModel().getMessage(), "Response message should not be
- // null");
- // assertEquals(
- // OllamaChatMessageRole.ASSISTANT.getRoleName(),
- // chatResult.getResponseModel().getMessage().getRole().getRoleName(),
- // "Role of the response message should be ASSISTANT");
- // List Scenario: Registers a tool, enables useTools, sends a user message, and verifies the
- // * assistant's tool call. Usage: chat, explicit tool, useTools=true, no thinking, no
- // streaming.
- // */
- // @Test
- // @Order(13)
- // void shouldChatWithExplicitToolAndUseTools() throws OllamaBaseException {
- // String theToolModel = TOOLS_MODEL;
- // api.pullModel(theToolModel);
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(theToolModel);
- //
- // api.registerTool(employeeFinderTool());
- //
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "Give me the ID and address of the employee Rahul Kumar.")
- // .build();
- // requestModel.setOptions(new
- // OptionsBuilder().setTemperature(0.9f).build().getOptionsMap());
- // requestModel.setUseTools(true);
- // OllamaChatResult chatResult = api.chat(requestModel, null);
- //
- // assertNotNull(chatResult, "chatResult should not be null");
- // assertNotNull(chatResult.getResponseModel(), "Response model should not be null");
- // assertNotNull(
- // chatResult.getResponseModel().getMessage(), "Response message should not be
- // null");
- // assertEquals(
- // OllamaChatMessageRole.ASSISTANT.getRoleName(),
- // chatResult.getResponseModel().getMessage().getRole().getRoleName(),
- // "Role of the response message should be ASSISTANT");
- //
- // boolean toolCalled = false;
- // List Scenario: Registers a tool, sends a user message, and streams the assistant's
- // response
- // * (with tool call). Usage: chat, explicit tool, useTools=false, streaming enabled.
- // */
- // @Test
- // @Order(14)
- // void shouldChatWithToolsAndStream() throws OllamaBaseException {
- // String theToolModel = TOOLS_MODEL;
- // api.pullModel(theToolModel);
- //
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(theToolModel);
- //
- // api.registerTool(employeeFinderTool());
- //
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "Give me the ID and address of employee Rahul Kumar")
- // .withKeepAlive("0m")
- // .withOptions(new OptionsBuilder().setTemperature(0.9f).build())
- // .build();
- // requestModel.setUseTools(true);
- // OllamaChatResult chatResult = api.chat(requestModel, new
- // ConsoleOutputChatTokenHandler());
- //
- // assertNotNull(chatResult, "chatResult should not be null");
- // assertNotNull(chatResult.getResponseModel(), "Response model should not be null");
- // assertNotNull(
- // chatResult.getResponseModel().getMessage(), "Response message should not be
- // null");
- // assertEquals(
- // OllamaChatMessageRole.ASSISTANT.getRoleName(),
- // chatResult.getResponseModel().getMessage().getRole().getRoleName(),
- // "Role of the response message should be ASSISTANT");
- // List Scenario: Registers annotated tools, sends a user message that triggers a tool call,
- // and
- // * verifies the tool call and arguments. Usage: chat, annotated tool, no thinking, no
- // streaming.
- // */
- // @Test
- // @Order(12)
- // void shouldChatWithAnnotatedToolSingleParam() throws OllamaBaseException {
- // String theToolModel = TOOLS_MODEL;
- // api.pullModel(theToolModel);
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(theToolModel);
- //
- // api.registerAnnotatedTools();
- //
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "Compute the most important constant in the world using 5
- // digits")
- // .build();
- // requestModel.setUseTools(true);
- // OllamaChatResult chatResult = api.chat(requestModel, null);
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getResponseModel());
- // assertNotNull(chatResult.getResponseModel().getMessage());
- // assertEquals(
- // OllamaChatMessageRole.ASSISTANT.getRoleName(),
- // chatResult.getResponseModel().getMessage().getRole().getRoleName());
- // List Scenario: Registers annotated tools, sends a user message that may trigger a tool
- // call
- // * with multiple arguments. Usage: chat, annotated tool, no thinking, no streaming,
- // multiple
- // * parameters.
- // *
- // * Note: This test is non-deterministic due to model variability; some assertions are
- // * commented out.
- // */
- // @Test
- // @Order(13)
- // void shouldChatWithAnnotatedToolMultipleParams() throws OllamaBaseException {
- // String theToolModel = TOOLS_MODEL;
- // api.pullModel(theToolModel);
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(theToolModel);
- //
- // api.registerAnnotatedTools(new AnnotatedTool());
- //
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "Greet Rahul with a lot of hearts and respond to me with count
- // of"
- // + " emojis that have been in used in the greeting")
- // .build();
- //
- // OllamaChatResult chatResult = api.chat(requestModel, null);
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getResponseModel());
- // assertNotNull(chatResult.getResponseModel().getMessage());
- // assertEquals(
- // OllamaChatMessageRole.ASSISTANT.getRoleName(),
- // chatResult.getResponseModel().getMessage().getRole().getRoleName());
- // }
- //
- // /**
- // * Tests chat API with streaming enabled (no tools, no thinking).
- // *
- // * Scenario: Sends a user message and streams the assistant's response. Usage: chat, no
- // * tools, no thinking, streaming enabled.
- // */
- // @Test
- // @Order(15)
- // void shouldChatWithStream() throws OllamaBaseException {
- // api.deregisterTools();
- // api.pullModel(GENERAL_PURPOSE_MODEL);
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(GENERAL_PURPOSE_MODEL);
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "What is the capital of France? And what's France's connection
- // with"
- // + " Mona Lisa?")
- // .build();
- // requestModel.setThink(false);
- //
- // OllamaChatResult chatResult = api.chat(requestModel, new
- // ConsoleOutputChatTokenHandler());
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getResponseModel());
- // assertNotNull(chatResult.getResponseModel().getMessage());
- // assertNotNull(chatResult.getResponseModel().getMessage().getResponse());
- // }
- //
- // /**
- // * Tests chat API with thinking and streaming enabled.
- // *
- // * Scenario: Sends a user message with thinking enabled and streams the assistant's
- // response.
- // * Usage: chat, no tools, thinking enabled, streaming enabled.
- // */
- // @Test
- // @Order(15)
- // void shouldChatWithThinkingAndStream() throws OllamaBaseException {
- // api.pullModel(THINKING_TOOL_MODEL_2);
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(THINKING_TOOL_MODEL_2);
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "What is the capital of France? And what's France's connection
- // with"
- // + " Mona Lisa?")
- // .withThinking(true)
- // .withKeepAlive("0m")
- // .build();
- //
- // OllamaChatResult chatResult = api.chat(requestModel, new
- // ConsoleOutputChatTokenHandler());
- //
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getResponseModel());
- // assertNotNull(chatResult.getResponseModel().getMessage());
- // assertNotNull(chatResult.getResponseModel().getMessage().getResponse());
- // }
- //
- // /**
- // * Tests chat API with an image input from a URL.
- // *
- // * Scenario: Sends a user message with an image URL and verifies the assistant's
- // response.
- // * Usage: chat, vision model, image from URL, no tools, no thinking, no streaming.
- // */
- // @Test
- // @Order(10)
- // void shouldChatWithImageFromURL()
- // throws OllamaBaseException, IOException, InterruptedException {
- // api.pullModel(VISION_MODEL);
- //
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(VISION_MODEL);
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "What's in the picture?",
- // Collections.emptyList(),
- //
- // "https://t3.ftcdn.net/jpg/02/96/63/80/360_F_296638053_0gUVA4WVBKceGsIr7LNqRWSnkusi07dq.jpg")
- // .build();
- // api.registerAnnotatedTools(new OllamaAPIIntegrationTest());
- //
- // OllamaChatResult chatResult = api.chat(requestModel, null);
- // assertNotNull(chatResult);
- // }
- //
- // /**
- // * Tests chat API with an image input from a file and multi-turn history.
- // *
- // * Scenario: Sends a user message with an image file, then continues the conversation
- // with
- // * chat history. Usage: chat, vision model, image from file, multi-turn, no tools, no
- // thinking,
- // * no streaming.
- // */
- // @Test
- // @Order(10)
- // void shouldChatWithImageFromFileAndHistory() throws OllamaBaseException {
- // api.pullModel(VISION_MODEL);
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(VISION_MODEL);
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "What's in the picture?",
- // Collections.emptyList(),
- // List.of(getImageFileFromClasspath("emoji-smile.jpeg")))
- // .build();
- //
- // OllamaChatResult chatResult = api.chat(requestModel, null);
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getResponseModel());
- // builder.reset();
- //
- // requestModel =
- // builder.withMessages(chatResult.getChatHistory())
- // .withMessage(OllamaChatMessageRole.USER, "What's the color?")
- // .build();
- //
- // chatResult = api.chat(requestModel, null);
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getResponseModel());
- // }
- //
- // // /**
- // // * Tests generateWithImages using an image URL as input.
- // // *
- // // * Scenario: Calls generateWithImages with a vision model and an image URL,
- // expecting a
- // // * non-empty response. Usage: generateWithImages, image from URL, no streaming.
- // // */
- // // @Test
- // // @Order(17)
- // // void shouldGenerateWithImageURLs()
- // // throws OllamaBaseException {
- // // api.pullModel(VISION_MODEL);
- // //
- // // OllamaResult result =
- // // api.generateWithImages(
- // // VISION_MODEL,
- // // "What is in this image?",
- // // List.of(
- // //
- // // "https://i.pinimg.com/736x/f9/4e/cb/f94ecba040696a3a20b484d2e15159ec.jpg"),
- // // new OptionsBuilder().build(),
- // // null,
- // // null);
- // // assertNotNull(result);
- // // assertNotNull(result.getResponse());
- // // assertFalse(result.getResponse().isEmpty());
- // // }
- //
- // /**
- // * Tests generateWithImages using an image file as input.
- // *
- // * Scenario: Calls generateWithImages with a vision model and an image file, expecting
- // a
- // * non-empty response. Usage: generateWithImages, image from file, no streaming.
- // */
- // @Test
- // @Order(18)
- // void shouldGenerateWithImageFiles() throws OllamaBaseException {
- // api.pullModel(VISION_MODEL);
- // try {
- // OllamaGenerateRequest request =
- // OllamaGenerateRequestBuilder.builder()
- // .withModel(VISION_MODEL)
- // .withPrompt("What is in this image?")
- // .withRaw(false)
- // .withThink(false)
- // .withOptions(new OptionsBuilder().build())
- // .withImages(List.of(getImageFileFromClasspath("roses.jpg")))
- // .withFormat(null)
- // .withKeepAlive("0m")
- // .build();
- // OllamaGenerateStreamObserver handler = null;
- // OllamaResult result = api.generate(request, handler);
- // assertNotNull(result);
- // assertNotNull(result.getResponse());
- // assertFalse(result.getResponse().isEmpty());
- // } catch (OllamaBaseException e) {
- // fail(e);
- // } catch (IOException e) {
- // throw new RuntimeException(e);
- // }
- // }
- //
- // /**
- // * Tests generateWithImages with image file input and streaming enabled.
- // *
- // * Scenario: Calls generateWithImages with a vision model, an image file, and a
- // streaming
- // * handler for the response. Usage: generateWithImages, image from file, streaming
- // enabled.
- // */
- // @Test
- // @Order(20)
- // void shouldGenerateWithImageFilesAndResponseStreamed() throws OllamaBaseException,
- // IOException {
- // api.pullModel(VISION_MODEL);
- // OllamaGenerateRequest request =
- // OllamaGenerateRequestBuilder.builder()
- // .withModel(VISION_MODEL)
- // .withPrompt("What is in this image?")
- // .withRaw(false)
- // .withThink(false)
- // .withOptions(new OptionsBuilder().build())
- // .withImages(List.of(getImageFileFromClasspath("roses.jpg")))
- // .withFormat(null)
- // .withKeepAlive("0m")
- // .build();
- // OllamaGenerateStreamObserver handler =
- // new OllamaGenerateStreamObserver(
- // new ConsoleOutputGenerateTokenHandler(),
- // new ConsoleOutputGenerateTokenHandler());
- // OllamaResult result = api.generate(request, handler);
- // assertNotNull(result);
- // assertNotNull(result.getResponse());
- // assertFalse(result.getResponse().isEmpty());
- // }
- //
- // /**
- // * Tests generate with thinking enabled (no streaming).
- // *
- // * Scenario: Calls generate with think=true, expecting both response and thinking
- // fields to
- // * be populated. Usage: generate, think=true, no streaming.
- // */
- // @Test
- // @Order(20)
- // void shouldGenerateWithThinking() throws OllamaBaseException {
- // api.pullModel(THINKING_TOOL_MODEL);
- //
- // boolean raw = false;
- // boolean think = true;
- //
- // OllamaGenerateRequest request =
- // OllamaGenerateRequestBuilder.builder()
- // .withModel(THINKING_TOOL_MODEL)
- // .withPrompt("Who are you?")
- // .withRaw(raw)
- // .withThink(think)
- // .withOptions(new OptionsBuilder().build())
- // .withFormat(null)
- // .withKeepAlive("0m")
- // .build();
- // OllamaGenerateStreamObserver handler = new OllamaGenerateStreamObserver(null, null);
- //
- // OllamaResult result = api.generate(request, handler);
+ // THINKING_TOOL_MODEL_2,
+ // "Validate: 1+1=2",
+ // raw,
+ // thinking,
+ // new OptionsBuilder().build(),
+ // new OllamaGenerateStreamObserver(null, null));
// assertNotNull(result);
// assertNotNull(result.getResponse());
// assertNotNull(result.getThinking());
// }
- //
- // /**
- // * Tests generate with thinking and streaming enabled.
- // *
- // * Scenario: Calls generate with think=true and a stream handler for both thinking and
- // * response tokens. Usage: generate, think=true, streaming enabled.
- // */
- // @Test
- // @Order(20)
- // void shouldGenerateWithThinkingAndStreamHandler() throws OllamaBaseException {
- // api.pullModel(THINKING_TOOL_MODEL);
- // boolean raw = false;
- // OllamaGenerateRequest request =
- // OllamaGenerateRequestBuilder.builder()
- // .withModel(THINKING_TOOL_MODEL)
- // .withPrompt("Who are you?")
- // .withRaw(raw)
- // .withThink(true)
- // .withOptions(new OptionsBuilder().build())
- // .withFormat(null)
- // .withKeepAlive("0m")
- // .build();
- // OllamaGenerateStreamObserver handler =
- // new OllamaGenerateStreamObserver(
- // thinkingToken -> {
- // LOG.info(thinkingToken.toUpperCase());
- // },
- // resToken -> {
- // LOG.info(resToken.toLowerCase());
- // });
- //
- // OllamaResult result = api.generate(request, handler);
- // assertNotNull(result);
- // assertNotNull(result.getResponse());
- // assertNotNull(result.getThinking());
- // }
- //
- // /**
- // * Tests generate with raw=true parameter.
- // *
- // * Scenario: Calls generate with raw=true, which sends the prompt as-is without any
- // * formatting. Usage: generate, raw=true, no thinking, no streaming.
- // */
- // @Test
- // @Order(21)
- // void shouldGenerateWithRawMode() throws OllamaBaseException {
- // api.pullModel(GENERAL_PURPOSE_MODEL);
- // api.unloadModel(GENERAL_PURPOSE_MODEL);
- // boolean raw = true;
- // boolean thinking = false;
- // OllamaGenerateRequest request =
- // OllamaGenerateRequestBuilder.builder()
- // .withModel(GENERAL_PURPOSE_MODEL)
- // .withPrompt("What is 2+2?")
- // .withRaw(raw)
- // .withThink(thinking)
- // .withOptions(new OptionsBuilder().build())
- // .withFormat(null)
- // .withKeepAlive("0m")
- // .build();
- // OllamaGenerateStreamObserver handler = new OllamaGenerateStreamObserver(null, null);
- // OllamaResult result = api.generate(request, handler);
- // assertNotNull(result);
- // assertNotNull(result.getResponse());
- // assertFalse(result.getResponse().isEmpty());
- // }
- //
- // /**
- // * Tests generate with raw=true and streaming enabled.
- // *
- // * Scenario: Calls generate with raw=true and streams the response. Usage: generate,
- // * raw=true, no thinking, streaming enabled.
- // */
- // @Test
- // @Order(22)
- // void shouldGenerateWithRawModeAndStreaming() throws OllamaBaseException {
- // api.pullModel(GENERAL_PURPOSE_MODEL);
- // boolean raw = true;
- // OllamaGenerateRequest request =
- // OllamaGenerateRequestBuilder.builder()
- // .withModel(GENERAL_PURPOSE_MODEL)
- // .withPrompt("What is the largest planet in our solar system?")
- // .withRaw(raw)
- // .withThink(false)
- // .withOptions(new OptionsBuilder().build())
- // .withFormat(null)
- // .withKeepAlive("0m")
- // .build();
- // OllamaGenerateStreamObserver handler =
- // new OllamaGenerateStreamObserver(null, new
- // ConsoleOutputGenerateTokenHandler());
- // OllamaResult result = api.generate(request, handler);
- //
- // assertNotNull(result);
- // assertNotNull(result.getResponse());
- // assertFalse(result.getResponse().isEmpty());
- // }
- //
- // // /**
- // // * Tests generate with raw=true and thinking enabled.
- // // *
- // // * Scenario: Calls generate with raw=true and think=true combination. Usage:
- // generate,
- // // * raw=true, thinking enabled, no streaming.
- // // */
- // // @Test
- // // @Order(23)
- // // void shouldGenerateWithRawModeAndThinking()
- // // throws OllamaBaseException
- // // {
- // // api.pullModel(THINKING_TOOL_MODEL_2);
- // // api.unloadModel(THINKING_TOOL_MODEL_2);
- // // boolean raw =
- // // true; // if true no formatting will be applied to the prompt. You may
- // choose
- // // to use
- // // // the raw parameter if you are specifying a full templated prompt in your
- // // // request to the API
- // // boolean thinking = true;
- // // OllamaResult result =
- // // api.generate(
- // // THINKING_TOOL_MODEL_2,
- // // "Validate: 1+1=2",
- // // raw,
- // // thinking,
- // // new OptionsBuilder().build(),
- // // new OllamaGenerateStreamObserver(null, null));
- // // assertNotNull(result);
- // // assertNotNull(result.getResponse());
- // // assertNotNull(result.getThinking());
- // // }
- //
- // /**
- // * Tests generate with all parameters enabled: raw=true, thinking=true, and streaming.
- // *
- // * Scenario: Calls generate with all possible parameters enabled. Usage: generate,
- // raw=true,
- // * thinking enabled, streaming enabled.
- // */
- // @Test
- // @Order(24)
- // void shouldGenerateWithAllParametersEnabled() throws OllamaBaseException {
- // api.pullModel(THINKING_TOOL_MODEL);
- // // Settinng raw here instructs to keep the response raw. Even if the model generates
- // // 'thinking' tokens, they will not be received as separate tokens and will be mised
- // with
- // // 'response' tokens
- // boolean raw = true;
- // OllamaGenerateRequest request =
- // OllamaGenerateRequestBuilder.builder()
- // .withModel(THINKING_TOOL_MODEL)
- // .withPrompt(
- // "Count 1 to 5. Just give me the numbers and do not give any
- // other"
- // + " details or information.")
- // .withRaw(raw)
- // .withThink(true)
- // .withOptions(new OptionsBuilder().setTemperature(0.1f).build())
- // .withFormat(null)
- // .withKeepAlive("0m")
- // .build();
- // OllamaGenerateStreamObserver handler =
- // new OllamaGenerateStreamObserver(
- // thinkingToken -> LOG.info("THINKING: {}", thinkingToken),
- // responseToken -> LOG.info("RESPONSE: {}", responseToken));
- // OllamaResult result = api.generate(request, handler);
- // assertNotNull(result);
- // assertNotNull(result.getResponse());
- // assertNotNull(result.getThinking());
- // }
- //
- // /**
- // * Tests generateWithFormat with complex nested JSON schema.
- // *
- // * Scenario: Uses a more complex JSON schema with nested objects and arrays. Usage:
- // * generateWithFormat with complex schema.
- // */
- // @Test
- // @Order(25)
- // void shouldGenerateWithComplexStructuredOutput() throws OllamaBaseException {
- // api.pullModel(TOOLS_MODEL);
- //
- // String prompt =
- // "Generate information about three major cities: their names, populations, and
- // top"
- // + " attractions.";
- //
- // Map Scenario: Enables thinking in chat mode without streaming. Usage: chat, thinking
- // enabled,
- // * no streaming, no tools.
- // */
- // @Test
- // @Order(26)
- // void shouldChatWithThinkingNoStream() throws OllamaBaseException {
- // api.pullModel(THINKING_TOOL_MODEL);
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(THINKING_TOOL_MODEL);
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "What is the meaning of life? Think deeply about this.")
- // .withThinking(true)
- // .build();
- //
- // OllamaChatResult chatResult = api.chat(requestModel, null);
- //
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getResponseModel());
- // assertNotNull(chatResult.getResponseModel().getMessage());
- // assertNotNull(chatResult.getResponseModel().getMessage().getResponse());
- // // Note: Thinking content might be in the message or separate field depending on
- // // implementation
- // }
- //
- // /**
- // * Tests chat with custom options and streaming.
- // *
- // * Scenario: Combines custom options (temperature, top_p, etc.) with streaming. Usage:
- // chat,
- // * custom options, streaming enabled, no tools, no thinking.
- // */
- // @Test
- // @Order(27)
- // void shouldChatWithCustomOptionsAndStreaming() throws OllamaBaseException {
- // api.pullModel(GENERAL_PURPOSE_MODEL);
- //
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(GENERAL_PURPOSE_MODEL);
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "Tell me a creative story about a time traveler")
- // .withOptions(
- // new OptionsBuilder()
- // .setTemperature(0.9f)
- // .setTopP(0.9f)
- // .setTopK(40)
- // .build())
- // .build();
- //
- // OllamaChatResult chatResult = api.chat(requestModel, new
- // ConsoleOutputChatTokenHandler());
- //
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getResponseModel());
- // assertNotNull(chatResult.getResponseModel().getMessage().getResponse());
- // assertFalse(chatResult.getResponseModel().getMessage().getResponse().isEmpty());
- // }
- //
- // /**
- // * Tests chat with tools, thinking, and streaming all enabled.
- // *
- // * Scenario: The most complex chat scenario with all features enabled. Usage: chat,
- // tools,
- // * thinking enabled, streaming enabled.
- // */
- // @Test
- // @Order(28)
- // void shouldChatWithToolsThinkingAndStreaming() throws OllamaBaseException {
- // api.pullModel(THINKING_TOOL_MODEL_2);
- //
- // api.registerTool(employeeFinderTool());
- //
- // OllamaChatRequestBuilder builder =
- // OllamaChatRequestBuilder.builder().withModel(THINKING_TOOL_MODEL_2);
- // OllamaChatRequest requestModel =
- // builder.withMessage(
- // OllamaChatMessageRole.USER,
- // "I need to find information about employee John Smith. Think"
- // + " carefully about what details to retrieve.")
- // .withThinking(true)
- // .withOptions(new OptionsBuilder().setTemperature(0.1f).build())
- // .build();
- // requestModel.setUseTools(false);
- // OllamaChatResult chatResult = api.chat(requestModel, new
- // ConsoleOutputChatTokenHandler());
- //
- // assertNotNull(chatResult);
- // assertNotNull(chatResult.getResponseModel());
- // // Verify that either tools were called or a response was generated
- // assertTrue(chatResult.getChatHistory().size() >= 2);
- // }
- //
- // // /**
- // // * Tests generateWithImages with multiple image URLs.
- // // *
- // // * Scenario: Sends multiple image URLs to the vision model. Usage:
- // generateWithImages,
- // // * multiple image URLs, no streaming.
- // // */
- // // @Test
- // // @Order(29)
- // // void shouldGenerateWithMultipleImageURLs() throws OllamaBaseException {
- // // api.pullModel(VISION_MODEL);
- // //
- // // List