You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

131 lines
4.1 KiB

/**
* Shared LLM client for OpenClaw Skills.
* Calls OneAPI (or any OpenAI-compatible) gateway with a specific model.
* Requires: LLM_BASE_URL, LLM_API_KEY in environment.
*/
const LOG_PREFIX = '[LLM_Client]';
/**
* Build chat completions URL from base. Avoids double /v1 when user sets
* LLM_BASE_URL to e.g. http://100.x:3000/v1.
* @param {string} baseUrl - LLM_BASE_URL (may end with / or /v1)
* @returns {string} full URL for POST
*/
function buildChatCompletionsUrl(baseUrl) {
const base = (baseUrl || '').replace(/\/+$/, '');
if (!base) return '';
if (base.endsWith('/v1')) {
return `${base}/chat/completions`;
}
return `${base}/v1/chat/completions`;
}
/**
* Parse OneAPI/OpenAI error body for message.
* @param {string} text - response text
* @returns {string} error message for display
*/
function parseErrorBody(text) {
if (!text || typeof text !== 'string') return String(text || 'Unknown error');
try {
const obj = JSON.parse(text);
if (obj.error && typeof obj.error === 'object' && typeof obj.error.message === 'string') return obj.error.message;
if (obj.error && typeof obj.error === 'string') return obj.error;
if (typeof obj.message === 'string') return obj.message;
return text;
} catch {
return text;
}
}
/**
* Call a specific model via the configured LLM gateway (OpenAI Chat Completions).
* @param {string} modelName - Model id (e.g. "qwen3.5-plus", "claude-3-sonnet")
* @param {Array<{role: string, content: string}>} messages - Chat messages
* @param {object} options - Optional: temperature, max_tokens, stream, timeoutMs (client-only, default 60000)
* @returns {Promise<object>} Parsed JSON response (e.g. choices, usage)
* @throws {Error} On HTTP error or timeout; message/cause include OneAPI error details
*/
async function callSpecificModel(modelName, messages, options = {}) {
if (!Array.isArray(messages)) {
const err = new Error('messages must be an array');
err.code = 'LLM_CLIENT_CONFIG';
throw err;
}
if (options.stream === true) {
const err = new Error('Stream mode is not supported by this client; use stream: false or omit');
err.code = 'LLM_CLIENT_CONFIG';
throw err;
}
const baseUrl = (process.env.LLM_BASE_URL || '').trim();
const apiKey = (process.env.LLM_API_KEY || '').trim();
if (!baseUrl || !apiKey) {
const err = new Error('LLM_BASE_URL and LLM_API_KEY must be set in environment');
err.code = 'LLM_CLIENT_CONFIG';
throw err;
}
const url = buildChatCompletionsUrl(baseUrl);
if (!url) {
const err = new Error('Invalid LLM_BASE_URL');
err.code = 'LLM_CLIENT_CONFIG';
throw err;
}
const timeoutMs = options.timeoutMs != null ? Number(options.timeoutMs) : 60000;
const { timeoutMs: _drop, ...bodyOptions } = options;
const body = {
model: modelName,
messages,
...bodyOptions,
};
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
try {
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify(body),
signal: controller.signal,
});
clearTimeout(timeoutId);
if (!response.ok) {
const rawText = await response.text();
const errorSummary = parseErrorBody(rawText);
const err = new Error(`LLM gateway error (${response.status}): ${errorSummary}`);
err.status = response.status;
err.body = rawText;
err.cause = { message: errorSummary, status: response.status };
console.error(`${LOG_PREFIX} Error calling ${modelName}: ${errorSummary}`);
throw err;
}
return await response.json();
} catch (err) {
clearTimeout(timeoutId);
if (err.name === 'AbortError') {
const timeoutErr = new Error(`LLM request timed out after ${timeoutMs}ms`);
timeoutErr.code = 'ETIMEDOUT';
timeoutErr.cause = err;
console.error(`${LOG_PREFIX} Error calling ${modelName}: timeout (${timeoutMs}ms)`);
throw timeoutErr;
}
throw err;
}
}
module.exports = {
callSpecificModel,
buildChatCompletionsUrl,
};