Set keepAlive to 0m in OllamaAPI requests

Added keepAlive parameter with value '0m' to OllamaAPI requests for improved connection handling. Updated integration test to enable tool usage and commented out a test for raw mode and thinking with new model handling.
This commit is contained in:
amithkoujalgi 2025-09-20 23:12:35 +05:30
parent 5ef1ea906d
commit a9f6d4671c
No known key found for this signature in database
GPG Key ID: E29A37746AF94B70
2 changed files with 33 additions and 29 deletions

View File

@ -614,6 +614,7 @@ public class OllamaAPI {
ollamaRequestModel.setRaw(raw); ollamaRequestModel.setRaw(raw);
ollamaRequestModel.setThink(think); ollamaRequestModel.setThink(think);
ollamaRequestModel.setOptions(options.getOptionsMap()); ollamaRequestModel.setOptions(options.getOptionsMap());
ollamaRequestModel.setKeepAlive("0m");
// Based on 'think' flag, choose the appropriate stream handler(s) // Based on 'think' flag, choose the appropriate stream handler(s)
if (think) { if (think) {

View File

@ -667,7 +667,7 @@ class OllamaAPIIntegrationTest {
OllamaChatMessageRole.USER, OllamaChatMessageRole.USER,
"Compute the most important constant in the world using 5 digits") "Compute the most important constant in the world using 5 digits")
.build(); .build();
requestModel.setUseTools(true);
OllamaChatResult chatResult = api.chat(requestModel, null); OllamaChatResult chatResult = api.chat(requestModel, null);
assertNotNull(chatResult); assertNotNull(chatResult);
assertNotNull(chatResult.getResponseModel()); assertNotNull(chatResult.getResponseModel());
@ -1060,34 +1060,37 @@ class OllamaAPIIntegrationTest {
assertFalse(result.getResponse().isEmpty()); assertFalse(result.getResponse().isEmpty());
} }
/** // /**
* Tests generate with raw=true and thinking enabled. // * Tests generate with raw=true and thinking enabled.
* // *
* <p>Scenario: Calls generate with raw=true and think=true combination. Usage: generate, // * <p>Scenario: Calls generate with raw=true and think=true combination. Usage: generate,
* raw=true, thinking enabled, no streaming. // * raw=true, thinking enabled, no streaming.
*/ // */
@Test // @Test
@Order(23) // @Order(23)
void shouldGenerateWithRawModeAndThinking() // void shouldGenerateWithRawModeAndThinking()
throws OllamaBaseException, IOException, URISyntaxException, InterruptedException { // throws OllamaBaseException, IOException, URISyntaxException, InterruptedException
api.pullModel(THINKING_TOOL_MODEL); // {
boolean raw = // api.pullModel(THINKING_TOOL_MODEL_2);
true; // if true no formatting will be applied to the prompt. You may choose to use // api.unloadModel(THINKING_TOOL_MODEL_2);
// the raw parameter if you are specifying a full templated prompt in your // boolean raw =
// request to the API // true; // if true no formatting will be applied to the prompt. You may choose
boolean thinking = true; // to use
OllamaResult result = // // the raw parameter if you are specifying a full templated prompt in your
api.generate( // // request to the API
THINKING_TOOL_MODEL, // boolean thinking = true;
"What is a catalyst?", // OllamaResult result =
raw, // api.generate(
thinking, // THINKING_TOOL_MODEL_2,
new OptionsBuilder().build(), // "Validate: 1+1=2",
new OllamaGenerateStreamObserver(null, null)); // raw,
assertNotNull(result); // thinking,
assertNotNull(result.getResponse()); // new OptionsBuilder().build(),
assertNotNull(result.getThinking()); // new OllamaGenerateStreamObserver(null, null));
} // assertNotNull(result);
// assertNotNull(result.getResponse());
// assertNotNull(result.getThinking());
// }
/** /**
* Tests generate with all parameters enabled: raw=true, thinking=true, and streaming. * Tests generate with all parameters enabled: raw=true, thinking=true, and streaming.