Compare commits

..

10 Commits

Author SHA1 Message Date
amithkoujalgi
b43c9b8d93 [maven-release-plugin] prepare release v1.0.42 2024-01-02 18:10:38 +00:00
Amith Koujalgi
935964c9b0 Merge remote-tracking branch 'origin/main' 2024-01-02 23:39:29 +05:30
Amith Koujalgi
9aed9a5237 added OptionsBuilder and support for specifying extra params for ask API 2024-01-02 23:39:18 +05:30
amithkoujalgi
6c082c94c4 [maven-release-plugin] prepare for next development iteration 2024-01-02 17:49:26 +00:00
amithkoujalgi
6c93b8304a [maven-release-plugin] prepare release v1.0.41 2024-01-02 17:49:24 +00:00
Amith Koujalgi
85acf0fe78 added OptionsBuilder and support for specifying extra params for ask API 2024-01-02 23:18:17 +05:30
amithkoujalgi
fe64c6dd10 [maven-release-plugin] prepare for next development iteration 2023-12-30 20:05:10 +00:00
amithkoujalgi
b15066a204 [maven-release-plugin] prepare release v1.0.40 2023-12-30 20:05:08 +00:00
Amith Koujalgi
e2b29b6a07 added Prompt Builder 2023-12-31 01:33:59 +05:30
amithkoujalgi
7470ebe846 [maven-release-plugin] prepare for next development iteration 2023-12-30 17:42:18 +00:00
15 changed files with 498 additions and 20 deletions

View File

@@ -16,4 +16,10 @@ build-docs:
npm i --prefix docs && npm run build --prefix docs
start-docs:
npm i --prefix docs && npm run start --prefix docs
npm i --prefix docs && npm run start --prefix docs
start-cpu:
docker run -it -v ~/ollama:/root/.ollama -p 11434:11434 ollama/ollama
start-gpu:
docker run -it --gpus=all -v ~/ollama:/root/.ollama -p 11434:11434 ollama/ollama

View File

@@ -126,13 +126,14 @@ Actions CI workflow.
- [ ] Async APIs for images
- [ ] Add custom headers to requests
- [ ] Add additional params for `ask` APIs such as:
- `options`: additional model parameters for the Modelfile such as `temperature`
- `system`: system prompt to (overrides what is defined in the Modelfile)
- `template`: the full prompt or prompt template (overrides what is defined in the Modelfile)
- `context`: the context parameter returned from a previous request, which can be used to keep a
- [x] `options`: additional model parameters for the Modelfile such as `temperature` -
Supported [params](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values).
- [ ] `system`: system prompt to (overrides what is defined in the Modelfile)
- [ ] `template`: the full prompt or prompt template (overrides what is defined in the Modelfile)
- [ ] `context`: the context parameter returned from a previous request, which can be used to keep a
short
conversational memory
- `stream`: Add support for streaming responses from the model
- [ ] `stream`: Add support for streaming responses from the model
- [ ] Add test cases
- [ ] Handle exceptions better (maybe throw more appropriate exceptions)

View File

@@ -8,6 +8,11 @@ This API lets you ask questions to the LLMs in a synchronous way.
These APIs correlate to
the [completion](https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion) APIs.
Use the `OptionBuilder` to build the `Options` object
with [extra parameters](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values).
Refer
to [this](/docs/apis-extras/options-builder).
## Try asking a question about the model.
```java
@@ -19,11 +24,13 @@ public class Main {
OllamaAPI ollamaAPI = new OllamaAPI(host);
OllamaResult result = ollamaAPI.ask(OllamaModelType.LLAMA2, "Who are you?");
OllamaResult result =
ollamaAPI.ask(OllamaModelType.LLAMA2, "Who are you?", new OptionsBuilder().build());
System.out.println(result.getResponse());
}
}
```
You will get a response similar to:
@@ -47,11 +54,13 @@ public class Main {
String prompt = "List all cricket world cup teams of 2019.";
OllamaResult result = ollamaAPI.ask(OllamaModelType.LLAMA2, prompt);
OllamaResult result =
ollamaAPI.ask(OllamaModelType.LLAMA2, prompt, new OptionsBuilder().build());
System.out.println(result.getResponse());
}
}
```
You'd then get a response from the model:
@@ -84,12 +93,15 @@ public class Main {
String host = "http://localhost:11434/";
OllamaAPI ollamaAPI = new OllamaAPI(host);
String prompt = SamplePrompts.getSampleDatabasePromptWithQuestion(
"List all customer names who have bought one or more products");
OllamaResult result = ollamaAPI.ask(OllamaModelType.SQLCODER, prompt);
String prompt =
SamplePrompts.getSampleDatabasePromptWithQuestion(
"List all customer names who have bought one or more products");
OllamaResult result =
ollamaAPI.ask(OllamaModelType.SQLCODER, prompt, new OptionsBuilder().build());
System.out.println(result.getResponse());
}
}
```
_Note: Here I've used

View File

@@ -1,5 +1,5 @@
---
sidebar_position: 5
sidebar_position: 6
---
# Generate Embeddings

View File

@@ -0,0 +1,73 @@
---
sidebar_position: 5
---
# Prompt Builder
This is designed for prompt engineering. It allows you to easily build the prompt text for zero-shot, one-shot, few-shot
inferences.
```java
import io.github.amithkoujalgi.ollama4j.core.OllamaAPI;
import io.github.amithkoujalgi.ollama4j.core.models.OllamaResult;
import io.github.amithkoujalgi.ollama4j.core.types.OllamaModelType;
import io.github.amithkoujalgi.ollama4j.core.utils.PromptBuilder;
public class AskPhi {
public static void main(String[] args) throws Exception {
String host = "http://localhost:11434/";
OllamaAPI ollamaAPI = new OllamaAPI(host);
ollamaAPI.setRequestTimeoutSeconds(10);
String model = OllamaModelType.PHI;
PromptBuilder promptBuilder =
new PromptBuilder()
.addLine("You are an expert coder and understand different programming languages.")
.addLine("Given a question, answer ONLY with code.")
.addLine("Produce clean, formatted and indented code in markdown format.")
.addLine(
"DO NOT include ANY extra text apart from code. Follow this instruction very strictly!")
.addLine("If there's any additional information you want to add, use comments within code.")
.addLine("Answer only in the programming language that has been asked for.")
.addSeparator()
.addLine("Example: Sum 2 numbers in Python")
.addLine("Answer:")
.addLine("```python")
.addLine("def sum(num1: int, num2: int) -> int:")
.addLine(" return num1 + num2")
.addLine("```")
.addSeparator()
.add("How do I read a file in Go and print its contents to stdout?");
OllamaResult response = ollamaAPI.ask(model, promptBuilder.build());
System.out.println(response.getResponse());
}
}
```
You will get a response similar to:
```go
package main
import (
"fmt"
"io/ioutil"
)
func readFile(fileName string) {
file, err := ioutil.ReadFile(fileName)
if err != nil {
fmt.Fprintln(os.Stderr, "Error reading file:", err.Error())
return
}
f, _ := ioutil.ReadFile("file.txt")
if f != nil {
fmt.Println(f.String())
}
}
```

View File

@@ -0,0 +1,78 @@
---
sidebar_position: 1
---
# Options Builder
This lets you build options for the `ask()` API.
Following are the parameters supported by Ollama:
| Parameter | Description | Value Type | Example Usage |
|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|----------------------|
| mirostat | Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) | int | mirostat 0 |
| mirostat_eta | Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) | float | mirostat_eta 0.1 |
| mirostat_tau | Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) | float | mirostat_tau 5.0 |
| num_ctx | Sets the size of the context window used to generate the next token. (Default: 2048) | int | num_ctx 4096 |
| num_gqa | The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b | int | num_gqa 1 |
| num_gpu | The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. | int | num_gpu 50 |
| num_thread | Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). | int | num_thread 8 |
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 |
| repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 |
| temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | temperature 0.7 |
| seed | Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) | int | seed 42 |
| stop | Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate `stop` parameters in a modelfile. | string | stop "AI assistant:" |
| tfs_z | Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) | float | tfs_z 1 |
| num_predict | Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) | int | num_predict 42 |
| top_k | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) | int | top_k 40 |
| top_p | Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) | float | top_p 0.9 |
Link to [source](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values).
Also, see how to set those Ollama parameters using
the `OptionsBuilder`
from [javadoc](https://amithkoujalgi.github.io/ollama4j/apidocs/io/github/amithkoujalgi/ollama4j/core/utils/OptionsBuilder.html).
## Build an empty `Options` object
```java
import io.github.amithkoujalgi.ollama4j.core.utils.Options;
import io.github.amithkoujalgi.ollama4j.core.utils.OptionsBuilder;
public class Main {
public static void main(String[] args) {
String host = "http://localhost:11434/";
OllamaAPI ollamaAPI = new OllamaAPI(host);
Options options = new OptionsBuilder().build();
}
}
```
## Build the `Options` object with values
```java
import io.github.amithkoujalgi.ollama4j.core.utils.Options;
import io.github.amithkoujalgi.ollama4j.core.utils.OptionsBuilder;
public class Main {
public static void main(String[] args) {
String host = "http://localhost:11434/";
OllamaAPI ollamaAPI = new OllamaAPI(host);
Options options =
new OptionsBuilder()
.setMirostat(10)
.setMirostatEta(0.5f)
.setNumGpu(2)
.setTemperature(1.5f)
.build();
}
}
```

View File

@@ -76,7 +76,7 @@ const config = {
type: 'docSidebar',
sidebarId: 'tutorialSidebar',
position: 'left',
label: 'Usage',
label: 'Docs',
},
{to: 'https://amithkoujalgi.github.io/ollama4j/apidocs/', label: 'Javadoc', position: 'left'},
{to: '/blog', label: 'Blog', position: 'left'},

View File

@@ -4,7 +4,7 @@
<groupId>io.github.amithkoujalgi</groupId>
<artifactId>ollama4j</artifactId>
<version>1.0.39</version>
<version>1.0.42</version>
<name>Ollama4j</name>
<description>Java library for interacting with Ollama API.</description>
@@ -39,7 +39,7 @@
<connection>scm:git:git@github.com:amithkoujalgi/ollama4j.git</connection>
<developerConnection>scm:git:https://github.com/amithkoujalgi/ollama4j.git</developerConnection>
<url>https://github.com/amithkoujalgi/ollama4j</url>
<tag>v1.0.39</tag>
<tag>v1.0.42</tag>
</scm>
<build>

View File

@@ -6,6 +6,7 @@ import io.github.amithkoujalgi.ollama4j.core.models.request.CustomModelFileConte
import io.github.amithkoujalgi.ollama4j.core.models.request.CustomModelFilePathRequest;
import io.github.amithkoujalgi.ollama4j.core.models.request.ModelEmbeddingsRequest;
import io.github.amithkoujalgi.ollama4j.core.models.request.ModelRequest;
import io.github.amithkoujalgi.ollama4j.core.utils.Options;
import io.github.amithkoujalgi.ollama4j.core.utils.Utils;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
@@ -332,11 +333,15 @@ public class OllamaAPI {
*
* @param model the ollama model to ask the question to
* @param prompt the prompt/question text
* @param options the Options object - <a
* href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">More
* details on the options</a>
* @return OllamaResult that includes response text and time taken for response
*/
public OllamaResult ask(String model, String prompt)
public OllamaResult ask(String model, String prompt, Options options)
throws OllamaBaseException, IOException, InterruptedException {
OllamaRequestModel ollamaRequestModel = new OllamaRequestModel(model, prompt);
ollamaRequestModel.setOptions(options.getOptionsMap());
return askSync(ollamaRequestModel);
}

View File

@@ -1,10 +1,10 @@
package io.github.amithkoujalgi.ollama4j.core.models;
import static io.github.amithkoujalgi.ollama4j.core.utils.Utils.getObjectMapper;
import com.fasterxml.jackson.core.JsonProcessingException;
import java.util.List;
import java.util.Map;
import lombok.Data;
@Data
@@ -13,6 +13,7 @@ public class OllamaRequestModel {
private String model;
private String prompt;
private List<String> images;
private Map<String, Object> options;
public OllamaRequestModel(String model, String prompt) {
this.model = model;

View File

@@ -21,6 +21,7 @@ public class OllamaModelType {
public static final String VICUNA = "vicuna";
public static final String WIZARD_VICUNA_UNCENSORED = "wizard-vicuna-uncensored";
public static final String PHIND_CODELLAMA = "phind-codellama";
public static final String PHI = "phi";
public static final String ZEPHYR = "zephyr";
public static final String WIZARDCODER = "wizardcoder";
public static final String MISTRAL_OPENORCA = "mistral-openorca";

View File

@@ -0,0 +1,11 @@
package io.github.amithkoujalgi.ollama4j.core.utils;
import java.util.Map;
import lombok.Data;
/** Class for options for Ollama model. */
@Data
public class Options {
private final Map<String, Object> optionsMap;
}

View File

@@ -0,0 +1,218 @@
package io.github.amithkoujalgi.ollama4j.core.utils;
import java.util.HashMap;
/** Builder class for creating options for Ollama model. */
public class OptionsBuilder {
private final Options options;
/** Constructs a new OptionsBuilder with an empty options map. */
public OptionsBuilder() {
this.options = new Options(new HashMap<>());
}
/**
* Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2
* = Mirostat 2.0)
*
* @param value The value for the "mirostat" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setMirostat(int value) {
options.getOptionsMap().put("mirostat", value);
return this;
}
/**
* Influences how quickly the algorithm responds to feedback from the generated text. A lower
* learning rate will result in slower adjustments, while a higher learning rate will make the
* algorithm more responsive. (Default: 0.1)
*
* @param value The value for the "mirostat_eta" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setMirostatEta(float value) {
options.getOptionsMap().put("mirostat_eta", value);
return this;
}
/**
* Controls the balance between coherence and diversity of the output. A lower value will result
* in more focused and coherent text. (Default: 5.0)
*
* @param value The value for the "mirostat_tau" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setMirostatTau(float value) {
options.getOptionsMap().put("mirostat_tau", value);
return this;
}
/**
* Sets the size of the context window used to generate the next token. (Default: 2048)
*
* @param value The value for the "num_ctx" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setNumCtx(int value) {
options.getOptionsMap().put("num_ctx", value);
return this;
}
/**
* The number of GQA groups in the transformer layer. Required for some models, for example, it is
* 8 for llama2:70b.
*
* @param value The value for the "num_gqa" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setNumGqa(int value) {
options.getOptionsMap().put("num_gqa", value);
return this;
}
/**
* The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support,
* 0 to disable.
*
* @param value The value for the "num_gpu" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setNumGpu(int value) {
options.getOptionsMap().put("num_gpu", value);
return this;
}
/**
* Sets the number of threads to use during computation. By default, Ollama will detect this for
* optimal performance. It is recommended to set this value to the number of physical CPU cores
* your system has (as opposed to the logical number of cores).
*
* @param value The value for the "num_thread" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setNumThread(int value) {
options.getOptionsMap().put("num_thread", value);
return this;
}
/**
* Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled,
* -1 = num_ctx)
*
* @param value The value for the "repeat_last_n" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setRepeatLastN(int value) {
options.getOptionsMap().put("repeat_last_n", value);
return this;
}
/**
* Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions
* more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)
*
* @param value The value for the "repeat_penalty" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setRepeatPenalty(float value) {
options.getOptionsMap().put("repeat_penalty", value);
return this;
}
/**
* The temperature of the model. Increasing the temperature will make the model answer more
* creatively. (Default: 0.8)
*
* @param value The value for the "temperature" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setTemperature(float value) {
options.getOptionsMap().put("temperature", value);
return this;
}
/**
* Sets the random number seed to use for generation. Setting this to a specific number will make
* the model generate the same text for the same prompt. (Default: 0)
*
* @param value The value for the "seed" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setSeed(int value) {
options.getOptionsMap().put("seed", value);
return this;
}
/**
* Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating
* text and return. Multiple stop patterns may be set by specifying multiple separate `stop`
* parameters in a modelfile.
*
* @param value The value for the "stop" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setStop(String value) {
options.getOptionsMap().put("stop", value);
return this;
}
/**
* Tail free sampling is used to reduce the impact of less probable tokens from the output. A
* higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this
* setting. (default: 1)
*
* @param value The value for the "tfs_z" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setTfsZ(float value) {
options.getOptionsMap().put("tfs_z", value);
return this;
}
/**
* Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite
* generation, -2 = fill context)
*
* @param value The value for the "num_predict" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setNumPredict(int value) {
options.getOptionsMap().put("num_predict", value);
return this;
}
/**
* Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more
* diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
*
* @param value The value for the "top_k" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setTopK(int value) {
options.getOptionsMap().put("top_k", value);
return this;
}
/**
* Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a
* lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)
*
* @param value The value for the "top_p" parameter.
* @return The updated OptionsBuilder.
*/
public OptionsBuilder setTopP(float value) {
options.getOptionsMap().put("top_p", value);
return this;
}
/**
* Builds the options map.
*
* @return The populated options map.
*/
public Options build() {
return options;
}
}

View File

@@ -0,0 +1,69 @@
package io.github.amithkoujalgi.ollama4j.core.utils;
/**
* The {@code PromptBuilder} class is used to construct prompt texts for language models (LLMs). It
* provides methods for adding text, adding lines, adding separators, and building the final prompt.
*
* <p>Example usage:
*
* <pre>{@code
* PromptBuilder promptBuilder = new PromptBuilder();
* promptBuilder.add("This is a sample prompt for language models.")
* .addLine("You can add lines to provide context.")
* .addSeparator()
* .add("Feel free to customize as needed.");
* String finalPrompt = promptBuilder.build();
* System.out.println(finalPrompt);
* }</pre>
*/
public class PromptBuilder {
private final StringBuilder prompt;
/** Constructs a new {@code PromptBuilder} with an empty prompt. */
public PromptBuilder() {
this.prompt = new StringBuilder();
}
/**
* Appends the specified text to the prompt.
*
* @param text the text to be added to the prompt
* @return a reference to this {@code PromptBuilder} instance for method chaining
*/
public PromptBuilder add(String text) {
prompt.append(text);
return this;
}
/**
* Appends the specified text followed by a newline character to the prompt.
*
* @param text the text to be added as a line to the prompt
* @return a reference to this {@code PromptBuilder} instance for method chaining
*/
public PromptBuilder addLine(String text) {
prompt.append(text).append("\n");
return this;
}
/**
* Appends a separator line to the prompt. The separator is a newline followed by a line of
* dashes.
*
* @return a reference to this {@code PromptBuilder} instance for method chaining
*/
public PromptBuilder addSeparator() {
prompt.append("\n--------------------------------------------------\n");
return this;
}
/**
* Builds and returns the final prompt as a string.
*
* @return the final prompt as a string
*/
public String build() {
return prompt.toString();
}
}

View File

@@ -8,6 +8,7 @@ import io.github.amithkoujalgi.ollama4j.core.models.ModelDetail;
import io.github.amithkoujalgi.ollama4j.core.models.OllamaAsyncResultCallback;
import io.github.amithkoujalgi.ollama4j.core.models.OllamaResult;
import io.github.amithkoujalgi.ollama4j.core.types.OllamaModelType;
import io.github.amithkoujalgi.ollama4j.core.utils.OptionsBuilder;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.ArrayList;
@@ -100,10 +101,12 @@ class TestMockedAPIs {
OllamaAPI ollamaAPI = Mockito.mock(OllamaAPI.class);
String model = OllamaModelType.LLAMA2;
String prompt = "some prompt text";
OptionsBuilder optionsBuilder = new OptionsBuilder();
try {
when(ollamaAPI.ask(model, prompt)).thenReturn(new OllamaResult("", 0, 200));
ollamaAPI.ask(model, prompt);
verify(ollamaAPI, times(1)).ask(model, prompt);
when(ollamaAPI.ask(model, prompt, optionsBuilder.build()))
.thenReturn(new OllamaResult("", 0, 200));
ollamaAPI.ask(model, prompt, optionsBuilder.build());
verify(ollamaAPI, times(1)).ask(model, prompt, optionsBuilder.build());
} catch (IOException | OllamaBaseException | InterruptedException e) {
throw new RuntimeException(e);
}