From 52ac0869d53b54e91db557f012f7ee9a3ecc3e9d Mon Sep 17 00:00:00 2001 From: Mohamed Bassem Date: Sun, 20 Jul 2025 23:39:38 +0000 Subject: feat: Add a max output tokens env variable --- packages/shared/inference.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'packages/shared/inference.ts') diff --git a/packages/shared/inference.ts b/packages/shared/inference.ts index 04fa8cfc..41026fbd 100644 --- a/packages/shared/inference.ts +++ b/packages/shared/inference.ts @@ -90,6 +90,7 @@ class OpenAIInferenceClient implements InferenceClient { { messages: [{ role: "user", content: prompt }], model: serverConfig.inference.textModel, + max_tokens: serverConfig.inference.maxOutputTokens, response_format: mapInferenceOutputSchema( { structured: optsWithDefaults.schema @@ -126,6 +127,7 @@ class OpenAIInferenceClient implements InferenceClient { const chatCompletion = await this.openAI.chat.completions.create( { model: serverConfig.inference.imageModel, + max_tokens: serverConfig.inference.maxOutputTokens, response_format: mapInferenceOutputSchema( { structured: optsWithDefaults.schema @@ -151,7 +153,6 @@ class OpenAIInferenceClient implements InferenceClient { ], }, ], - max_tokens: 2000, }, { signal: optsWithDefaults.abortSignal, @@ -224,6 +225,7 @@ class OllamaInferenceClient implements InferenceClient { keep_alive: serverConfig.inference.ollamaKeepAlive, options: { num_ctx: serverConfig.inference.contextLength, + num_predict: serverConfig.inference.maxOutputTokens, }, messages: [ { role: "user", content: prompt, images: image ? [image] : undefined }, -- cgit v1.2.3-70-g09d2