Files
xsh-assistant-next/composables/useLLM.ts

31 lines
1.2 KiB
TypeScript

import {type ChatMessage, llmModels, type LLMSpark, type MessageRole, type ModelTag} from "~/typings/llm";
import {useFetchWrapped} from "~/composables/useFetchWrapped";
export interface LLMRequestOptions {
modelTag: ModelTag
}
export const useLLM = (context: ChatMessage[], options: LLMRequestOptions): Promise<string> => new Promise((resolve, reject) => {
const {modelTag} = options
const model = llmModels.find(model => model.tag === modelTag)
if (!model) return reject('model specified is not available')
const loginState = useLoginState()
useFetchWrapped<LLMSpark.request | AuthedRequest, BaseResponse<LLMSpark.response>>(
model.endpoint,
{
token: loginState.token || '',
user_id: loginState.user.id,
prompt: JSON.stringify(context.filter(c => c.content && !c.interrupted).map(c => ({
role: c.role,
content: c.content
})))
}
).then(res => {
if (res.ret !== 200) return reject(res.msg || 'unknown error')
if (res.data.request_msg) return resolve(res.data.request_msg)
if (res.data.request_fail) return reject(res.data.request_fail?.header?.message || 'unknown error')
return reject('unknown error')
}).catch(err => {
reject(err)
})
})