yl-frontend/src/api/assistant.ts

225 lines
4.7 KiB
TypeScript

// 模拟延迟
const delay = (ms: number) => new Promise(resolve => setTimeout(resolve, ms))
// 接口类型定义
export interface VoiceCommand {
id: number
phrase: string
action: string
type: string
enabled: boolean
createTime: string
}
export interface VoiceReminder {
id: number
title: string
content: string
time: string
repeat: string[]
voiceEnabled: boolean
status: 'pending' | 'done'
}
export interface VoiceMessage {
id: number
role: 'user' | 'assistant'
content: string
type: 'text' | 'voice'
time: string
audioUrl?: string
}
// 模拟数据
const mockCommands: VoiceCommand[] = [
{
id: 1,
phrase: '打开电视',
action: 'tv_on',
type: 'device',
enabled: true,
createTime: '2024-03-20 10:00:00'
},
{
id: 2,
phrase: '关闭空调',
action: 'ac_off',
type: 'device',
enabled: true,
createTime: '2024-03-20 10:00:00'
}
]
const mockReminders: VoiceReminder[] = [
{
id: 1,
title: '吃药提醒',
content: '该吃降压药了',
time: '08:00',
repeat: ['mon', 'wed', 'fri'],
voiceEnabled: true,
status: 'pending'
},
{
id: 2,
title: '运动提醒',
content: '该去散步了',
time: '16:00',
repeat: ['everyday'],
voiceEnabled: true,
status: 'pending'
}
]
const mockMessages: VoiceMessage[] = [
{
id: 1,
role: 'assistant',
content: '您好!我是您的智能语音助手,有什么可以帮您的吗?',
type: 'text',
time: '2024-03-20 10:00:00'
},
{
id: 2,
role: 'user',
content: '帮我打开电视',
type: 'voice',
time: '2024-03-20 10:01:00',
audioUrl: 'https://example.com/audio/1.mp3'
}
]
// 模拟 API 接口
export const assistantApi = {
// 语音识别
async recognize(audio: File): Promise<{
text: string
confidence: number
}> {
await delay(500)
return {
text: '打开电视',
confidence: 0.95
}
},
// 语音合成
async synthesize(text: string, options: {
voice: string
rate: number
pitch: number
}): Promise<{
audioUrl: string
duration: number
}> {
await delay(500)
return {
audioUrl: 'https://example.com/tts/1.mp3',
duration: 3.5
}
},
// 执行语音指令
async executeCommand(command: string, context: any): Promise<{
action: string
status: string
response: string
}> {
await delay(300)
return {
action: 'tv_on',
status: 'success',
response: '好的,已为您打开电视'
}
},
// 获取语音指令列表
async getCommands(type?: string): Promise<VoiceCommand[]> {
await delay(300)
return type
? mockCommands.filter(cmd => cmd.type === type)
: mockCommands
},
// 添加语音指令
async addCommand(command: Omit<VoiceCommand, 'id' | 'createTime'>): Promise<VoiceCommand> {
await delay(300)
const newCommand = {
id: mockCommands.length + 1,
createTime: new Date().toISOString(),
...command
}
mockCommands.push(newCommand)
return newCommand
},
// 更新指令状态
async updateCommandStatus(id: number, enabled: boolean): Promise<void> {
await delay(200)
const command = mockCommands.find(c => c.id === id)
if (command) {
command.enabled = enabled
}
},
// 获取提醒列表
async getReminders(): Promise<VoiceReminder[]> {
await delay(300)
return mockReminders
},
// 添加提醒
async addReminder(reminder: Omit<VoiceReminder, 'id'>): Promise<VoiceReminder> {
await delay(300)
const newReminder = {
id: mockReminders.length + 1,
...reminder
}
mockReminders.push(newReminder)
return newReminder
},
// 更新提醒状态
async updateReminderStatus(id: number, status: 'pending' | 'done'): Promise<void> {
await delay(200)
const reminder = mockReminders.find(r => r.id === id)
if (reminder) {
reminder.status = status
}
},
// 获取对话历史
async getMessages(page: number = 1, size: number = 20): Promise<{
total: number
records: VoiceMessage[]
}> {
await delay(300)
return {
total: mockMessages.length,
records: mockMessages.slice((page - 1) * size, page * size)
}
},
// 发送消息
async sendMessage(message: Omit<VoiceMessage, 'id'>): Promise<VoiceMessage> {
await delay(300)
const newMessage = {
id: mockMessages.length + 1,
...message
}
mockMessages.push(newMessage)
return newMessage
},
// 更新语音设置
async updateSettings(settings: {
volume: number
rate: number
voice: string
wakeWord: string
autoReply: boolean
}): Promise<void> {
await delay(300)
console.log('更新语音设置:', settings)
}
}