Files
xsh-assistant-next/typings/llm.ts

92 lines
1.7 KiB
TypeScript

export type ChatSessionId = string
export type ChatMessageId = string
export type ModelTag =
'spark1_5' |
'spark3_0' |
'spark3_5'
export interface LLMModal {
tag: ModelTag
name: string
description: string
icon?: string
endpoint: string
}
export interface Assistant {
id: number;
user_id: number;
create_time: number;
tpl_name: string;
types: string;
des: string;
input_tpl: string;
role: string;
target: string;
style: string;
demand: string;
}
export namespace LLMSpark {
export interface request {
prompt: string
}
export interface response {
request_msg?: string
request_fail?: {
header?: {
code: number
message: string
sid: string
status: number
}
}
}
}
export const llmModels: Readonly<LLMModal[]> = Object.freeze([
{
tag: 'spark1_5',
name: 'Spark 1.5',
description: '科大讯飞星火 1.5',
icon: 'tabler:car',
endpoint: 'App.Assistant_Spark.Chat_1_5'
},
{
tag: 'spark3_0',
name: 'Spark 3.0',
description: '科大讯飞星火 3.0',
icon: 'tabler:plane-departure',
endpoint: 'App.Assistant_Spark.Chat_3_0'
},
{
tag: 'spark3_5',
name: 'Spark 3.5',
description: '科大讯飞星火 3.5',
icon: 'tabler:rocket',
endpoint: 'App.Assistant_Spark.Chat_3_5'
},
])
export interface ChatSession {
id: ChatSessionId
subject: string
create_at: number
messages: ChatMessage[]
last_input?: string
assistant?: Assistant
}
export type MessageRole = 'user' | 'assistant' | 'system'
export interface ChatMessage {
id: ChatMessageId
role: MessageRole
content: string
preset?: boolean
create_at?: number
interrupted?: boolean
}