diff --git a/src/backend/src/modules/puterai/AIInterfaceService.js b/src/backend/src/modules/puterai/AIInterfaceService.js index 1037af5ce..eadddc0b3 100644 --- a/src/backend/src/modules/puterai/AIInterfaceService.js +++ b/src/backend/src/modules/puterai/AIInterfaceService.js @@ -80,6 +80,8 @@ class AIInterfaceService extends BaseService { stream: { type: 'flag' }, response: { type: 'json' }, model: { type: 'string' }, + temperature: { type: 'number' }, + max_tokens: { type: 'number' }, }, result: { type: 'json' }, } diff --git a/src/backend/src/modules/puterai/OpenAICompletionService.js b/src/backend/src/modules/puterai/OpenAICompletionService.js index 773dd38c5..c32d9df7b 100644 --- a/src/backend/src/modules/puterai/OpenAICompletionService.js +++ b/src/backend/src/modules/puterai/OpenAICompletionService.js @@ -216,7 +216,10 @@ class OpenAICompletionService extends BaseService { * @returns {Promise} The completion response containing message and usage info * @throws {Error} If messages are invalid or content is flagged by moderation */ - async complete (messages, { stream, moderation, model, tools }) { + async complete (messages, { + stream, moderation, model, tools, + temperature, max_tokens, + }) { // Validate messages if ( ! Array.isArray(messages) ) { throw new Error('`messages` must be an array'); @@ -254,7 +257,8 @@ class OpenAICompletionService extends BaseService { messages: messages, model: model, ...(tools ? { tools } : {}), - // max_tokens, + ...(max_tokens ? { max_tokens } : {}), + ...(temperature ? { temperature } : {}), stream, ...(stream ? { stream_options: { include_usage: true },