mirror of
https://github.com/HeyPuter/puter.git
synced 2026-05-04 00:20:45 +00:00
fix: temperature and max_tokens (interface + OpenAI)
This commit is contained in:
@@ -80,6 +80,8 @@ class AIInterfaceService extends BaseService {
|
||||
stream: { type: 'flag' },
|
||||
response: { type: 'json' },
|
||||
model: { type: 'string' },
|
||||
temperature: { type: 'number' },
|
||||
max_tokens: { type: 'number' },
|
||||
},
|
||||
result: { type: 'json' },
|
||||
}
|
||||
|
||||
@@ -216,7 +216,10 @@ class OpenAICompletionService extends BaseService {
|
||||
* @returns {Promise<Object>} The completion response containing message and usage info
|
||||
* @throws {Error} If messages are invalid or content is flagged by moderation
|
||||
*/
|
||||
async complete (messages, { stream, moderation, model, tools }) {
|
||||
async complete (messages, {
|
||||
stream, moderation, model, tools,
|
||||
temperature, max_tokens,
|
||||
}) {
|
||||
// Validate messages
|
||||
if ( ! Array.isArray(messages) ) {
|
||||
throw new Error('`messages` must be an array');
|
||||
@@ -254,7 +257,8 @@ class OpenAICompletionService extends BaseService {
|
||||
messages: messages,
|
||||
model: model,
|
||||
...(tools ? { tools } : {}),
|
||||
// max_tokens,
|
||||
...(max_tokens ? { max_tokens } : {}),
|
||||
...(temperature ? { temperature } : {}),
|
||||
stream,
|
||||
...(stream ? {
|
||||
stream_options: { include_usage: true },
|
||||
|
||||
Reference in New Issue
Block a user