mirror of
				https://github.com/amithkoujalgi/ollama4j.git
				synced 2025-11-04 02:20:50 +01:00 
			
		
		
		
	Set keepAlive to 0m in OllamaAPI requests
Added keepAlive parameter with value '0m' to OllamaAPI requests for improved connection handling. Updated integration test to enable tool usage and commented out a test for raw mode and thinking with new model handling.
This commit is contained in:
		@@ -614,6 +614,7 @@ public class OllamaAPI {
 | 
			
		||||
        ollamaRequestModel.setRaw(raw);
 | 
			
		||||
        ollamaRequestModel.setThink(think);
 | 
			
		||||
        ollamaRequestModel.setOptions(options.getOptionsMap());
 | 
			
		||||
        ollamaRequestModel.setKeepAlive("0m");
 | 
			
		||||
 | 
			
		||||
        // Based on 'think' flag, choose the appropriate stream handler(s)
 | 
			
		||||
        if (think) {
 | 
			
		||||
 
 | 
			
		||||
@@ -667,7 +667,7 @@ class OllamaAPIIntegrationTest {
 | 
			
		||||
                                OllamaChatMessageRole.USER,
 | 
			
		||||
                                "Compute the most important constant in the world using 5 digits")
 | 
			
		||||
                        .build();
 | 
			
		||||
 | 
			
		||||
        requestModel.setUseTools(true);
 | 
			
		||||
        OllamaChatResult chatResult = api.chat(requestModel, null);
 | 
			
		||||
        assertNotNull(chatResult);
 | 
			
		||||
        assertNotNull(chatResult.getResponseModel());
 | 
			
		||||
@@ -1060,34 +1060,37 @@ class OllamaAPIIntegrationTest {
 | 
			
		||||
        assertFalse(result.getResponse().isEmpty());
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Tests generate with raw=true and thinking enabled.
 | 
			
		||||
     *
 | 
			
		||||
     * <p>Scenario: Calls generate with raw=true and think=true combination. Usage: generate,
 | 
			
		||||
     * raw=true, thinking enabled, no streaming.
 | 
			
		||||
     */
 | 
			
		||||
    @Test
 | 
			
		||||
    @Order(23)
 | 
			
		||||
    void shouldGenerateWithRawModeAndThinking()
 | 
			
		||||
            throws OllamaBaseException, IOException, URISyntaxException, InterruptedException {
 | 
			
		||||
        api.pullModel(THINKING_TOOL_MODEL);
 | 
			
		||||
        boolean raw =
 | 
			
		||||
                true; // if true no formatting will be applied to the prompt. You may choose to use
 | 
			
		||||
        // the raw parameter if you are specifying a full templated prompt in your
 | 
			
		||||
        // request to the API
 | 
			
		||||
        boolean thinking = true;
 | 
			
		||||
        OllamaResult result =
 | 
			
		||||
                api.generate(
 | 
			
		||||
                        THINKING_TOOL_MODEL,
 | 
			
		||||
                        "What is a catalyst?",
 | 
			
		||||
                        raw,
 | 
			
		||||
                        thinking,
 | 
			
		||||
                        new OptionsBuilder().build(),
 | 
			
		||||
                        new OllamaGenerateStreamObserver(null, null));
 | 
			
		||||
        assertNotNull(result);
 | 
			
		||||
        assertNotNull(result.getResponse());
 | 
			
		||||
        assertNotNull(result.getThinking());
 | 
			
		||||
    }
 | 
			
		||||
    //    /**
 | 
			
		||||
    //     * Tests generate with raw=true and thinking enabled.
 | 
			
		||||
    //     *
 | 
			
		||||
    //     * <p>Scenario: Calls generate with raw=true and think=true combination. Usage: generate,
 | 
			
		||||
    //     * raw=true, thinking enabled, no streaming.
 | 
			
		||||
    //     */
 | 
			
		||||
    //    @Test
 | 
			
		||||
    //    @Order(23)
 | 
			
		||||
    //    void shouldGenerateWithRawModeAndThinking()
 | 
			
		||||
    //            throws OllamaBaseException, IOException, URISyntaxException, InterruptedException
 | 
			
		||||
    // {
 | 
			
		||||
    //        api.pullModel(THINKING_TOOL_MODEL_2);
 | 
			
		||||
    //        api.unloadModel(THINKING_TOOL_MODEL_2);
 | 
			
		||||
    //        boolean raw =
 | 
			
		||||
    //                true; // if true no formatting will be applied to the prompt. You may choose
 | 
			
		||||
    // to use
 | 
			
		||||
    //        // the raw parameter if you are specifying a full templated prompt in your
 | 
			
		||||
    //        // request to the API
 | 
			
		||||
    //        boolean thinking = true;
 | 
			
		||||
    //        OllamaResult result =
 | 
			
		||||
    //                api.generate(
 | 
			
		||||
    //                        THINKING_TOOL_MODEL_2,
 | 
			
		||||
    //                        "Validate: 1+1=2",
 | 
			
		||||
    //                        raw,
 | 
			
		||||
    //                        thinking,
 | 
			
		||||
    //                        new OptionsBuilder().build(),
 | 
			
		||||
    //                        new OllamaGenerateStreamObserver(null, null));
 | 
			
		||||
    //        assertNotNull(result);
 | 
			
		||||
    //        assertNotNull(result.getResponse());
 | 
			
		||||
    //        assertNotNull(result.getThinking());
 | 
			
		||||
    //    }
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Tests generate with all parameters enabled: raw=true, thinking=true, and streaming.
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user