feat: Add rfid feature and .gitignore file

This commit is contained in:
lmx
2025-11-28 16:25:35 +08:00
parent 818e8c3778
commit ade4b0a1f8
1244 changed files with 342105 additions and 0 deletions

View File

@ -0,0 +1,638 @@
/*****************************************************************
>file name : aispeech_asr.c
>create time : Thu 23 Dec 2021 10:00:58 AM CST
>思必驰语音识别算法平台接入
*****************************************************************/
#include "system/includes.h"
#include "aispeech_asr.h"
#include "smart_voice.h"
#include "voice_mic_data.h"
#include "key_event_deal.h"
#include "vad_mic.h"
#define ASR_FRAME_SAMPLES 160 /*语音识别帧长(采样点)*/
#define AISPEECH_ASR_SAMPLE_RATE 16000
struct ais_platform_asr_context {
void *mic;
void *core;
u8 data_enable;
};
extern const int config_aispeech_asr_enable;
static struct ais_platform_asr_context *__this = NULL;
#define ASR_CLK (160 * 1000000L) /*模块运行时钟(MaxFre:160MHz)*/
#ifdef AUDIO_PCM_DEBUG
//#define AISPEECH_ASR_AUDIO_DUMP
#endif /*AUDIO_PCM_DEBUG*/
#define AISPEECH_VAD_ASR_MODULE
#ifdef AISPEECH_VAD_ASR_MODULE
#include "vad_asr_demo.h"
#endif /*AISPEECH_VAD_ASR_MODULE*/
#if 1
#include "btstack/avctp_user.h"
#include "audio_anc.h"
#include "anc.h"
#endif
#define MIC_CAPTURE_BUF_SIZE (1024*10)
/*--------------------audio dump--------------------------------*/
#ifdef AISPEECH_ASR_AUDIO_DUMP
#include "cqueue.h"
OS_MUTEX catch_queue_amutex;
CQueue aec_uart_catch_queue;
u8 aec_uart_catch_buf[MIC_CAPTURE_BUF_SIZE];
extern int aec_uart_init();
extern int aec_uart_fill(u8 ch, void *buf, u16 size);
extern void aec_uart_write(void);
extern int aec_uart_close(void);
s16 gasr_exportbuf[256];
void audio_asr_export_demo_task(void *param)
{
int ret = 0;
int msg[16];
while (1) {
ret = os_taskq_pend(NULL, msg, ARRAY_SIZE(msg));
if (ret == OS_TASKQ) {
switch (msg[1]) {
case 0x01:
int rlen = 512;
while (LengthOfCQueue(&aec_uart_catch_queue) >= rlen) {
os_mutex_pend(&catch_queue_amutex, 0);
DeCQueue(&aec_uart_catch_queue, gasr_exportbuf, rlen);
os_mutex_post(&catch_queue_amutex);
/* for (int i = 0; i < 256; i++)
{
gasr_exportbuf[i] = i * 110 + 1;
} */
aec_uart_fill(0, gasr_exportbuf, rlen);
aec_uart_fill(1, gasr_exportbuf, rlen);
aec_uart_fill(2, gasr_exportbuf, rlen);
aec_uart_fill(3, gasr_exportbuf, rlen);
aec_uart_fill(4, gasr_exportbuf, rlen);
// putchar('W');
aec_uart_write();
}
break;
}
}
}
}
int audio_asr_export_demo_init()
{
int err = 0;
os_mutex_create(&catch_queue_amutex);
InitCQueue(&aec_uart_catch_queue, sizeof(aec_uart_catch_buf), (CQItemType *)aec_uart_catch_buf);
aec_uart_init();
task_create(audio_asr_export_demo_task, NULL, "audio_asr_export_task");
return err;
}
void audio_asr_export_demo_deinit()
{
aec_uart_close();
os_mutex_del(&catch_queue_amutex, 0);
task_kill("audio_asr_export_task");
}
#endif /*AISPEECH_ASR_AUDIO_DUMP*/
/*--------------------audio dump end-------------------------------*/
#ifdef AISPEECH_VAD_ASR_MODULE
#if 1
enum ais_asr_cmdid {
AIS_ASR_CMDID_NULL = 0,
/*Hey xxx系列关键词*/
AIS_ASR_CMDID_KEYWORD,
AIS_ASR_CMDID_PLAY_MUSIC, /*播放音乐*/
AIS_ASR_CMDID_STOP_MUSIC, /*停止播放*/
AIS_ASR_CMDID_PAUSE_MUSIC, /*暂停播放*/
AIS_ASR_CMDID_VOLUME_UP, /*增大音量*/
AIS_ASR_CMDID_VOLUME_DOWN, /*减小音量*/
AIS_ASR_CMDID_PREV_SONG, /*上一首*/
AIS_ASR_CMDID_NEXT_SONG, /*下一首*/
AIS_ASR_CMDID_CALL_ACTIVE, /*接听电话*/
AIS_ASR_CMDID_CALL_HANGUP, /*挂断电话*/
AIS_ASR_CMDID_ANC_OFF, /*关闭模式*/
AIS_ASR_CMDID_ANC_ON, /*降噪模式*/
AIS_ASR_CMDID_ANC_TRANSPARENCY, /*通透模式*/
/*TODO*/
AIS_ASR_CMDID_MAX,
};
typedef struct Input_CMD {
enum ais_asr_cmdid cmdid;
const char *cmd_str;
const char *at_cmd;
} Input_CMD_ST;
Input_CMD_ST global_asr_cmd[] = {
{ AIS_ASR_CMDID_KEYWORD, "xiao ting xiao ting", "AT+XEVENT=MAJORWAKEUP\r"},
{ AIS_ASR_CMDID_KEYWORD, "xiao jie tong xue", "AT+XEVENT=BIXINBIXIN\r"},
{ AIS_ASR_CMDID_STOP_MUSIC, "zan ting bo fang", "AT+XEVENT=ZANTINGBOFANG\r"},
{ AIS_ASR_CMDID_PLAY_MUSIC, "bo fang yin yue", "AT+XEVENT=BOFANGYINYUE\r"},
{ AIS_ASR_CMDID_PLAY_MUSIC, "ji xv bo fang", "AT+XEVENT=BOFANGYINYUE\r"},
{ AIS_ASR_CMDID_NEXT_SONG, "xia yi shou", "AT+XEVENT=XIAYISHOU\r"},
{ AIS_ASR_CMDID_PREV_SONG, "shang yi shou", "AT+XEVENT=SHANGYISHOU\r"},
{ AIS_ASR_CMDID_VOLUME_UP, "zeng da yin liang", "AT+XEVENT=ZENGDAYINLIANG\r"},
{ AIS_ASR_CMDID_VOLUME_UP, "yin liang da yi dian", "AT+XEVENT=ZENGDAYINLIANG\r"},
{ AIS_ASR_CMDID_VOLUME_DOWN, "jian xiao yin liang", "AT+XEVENT=JIANXIAOYINLIANG\r"},
{ AIS_ASR_CMDID_VOLUME_DOWN, "yin liang xiao yi dian", "AT+XEVENT=JIANXIAOYINLIANG\r"},
{ AIS_ASR_CMDID_CALL_ACTIVE, "jie ting dian hua", "AT+XEVENT=JIETINGDIANHUA\r"},
{ AIS_ASR_CMDID_CALL_HANGUP, "gua duan dian hua", "AT+XEVENT=GUADUANDIANHUA\r"},
{ AIS_ASR_CMDID_ANC_ON, "jiang zao mo shi", "AT+XEVENT=QUEDINGQUEDING\r"},
{ AIS_ASR_CMDID_ANC_TRANSPARENCY, "tong tou mo shi", "AT+XEVENT=QUEDINGQUEDING\r"},
{ AIS_ASR_CMDID_ANC_OFF, "guan bi jiang zao", "AT+XEVENT=QUXIAOQUXIAO\r"},
};
#endif
/*
* 算法引擎输出回调
*/
int aispeech_asr_output_handler(int status, const char *json, int bytes)
{
#if 1
u8 a2dp_state;
u8 call_state;
enum ais_asr_cmdid cmdid = AIS_ASR_CMDID_NULL;
char *cmdstr = strstr(json, "rec");
if (cmdstr != NULL) {
//printf("cmdstr: %s. \n", cmdstr+6);
cmdstr = cmdstr + 6;
for (int i = 0; i < (sizeof(global_asr_cmd) / sizeof(global_asr_cmd[0])); i++) {
if (memcmp(global_asr_cmd[i].cmd_str, cmdstr, strlen(global_asr_cmd[i].cmd_str)) == 0) {
cmdid = global_asr_cmd[i].cmdid;
int ret = user_send_at_cmd_prepare(global_asr_cmd[i].at_cmd, strlen(global_asr_cmd[i].at_cmd));
//printf("user_send_at_cmd_prepare :%s %d \n", global_asr_cmd[i].at_cmd, ret);
break;
}
}
}
printf("asr_post: %s. bytes:%d\n", json, bytes);
os_time_dly(1);
switch (cmdid) {
case AIS_ASR_CMDID_KEYWORD:
//主唤醒词:
printf("send SIRI cmd");
user_send_cmd_prepare(USER_CTRL_HFP_GET_SIRI_OPEN, 0, NULL);
break;
case AIS_ASR_CMDID_PLAY_MUSIC:
case AIS_ASR_CMDID_STOP_MUSIC:
case AIS_ASR_CMDID_PAUSE_MUSIC:
call_state = get_call_status();
if ((call_state == BT_CALL_OUTGOING) ||
(call_state == BT_CALL_ALERT)) {
//user_send_cmd_prepare(USER_CTRL_HFP_CALL_HANGUP, 0, NULL);
} else if (call_state == BT_CALL_INCOMING) {
//user_send_cmd_prepare(USER_CTRL_HFP_CALL_ANSWER, 0, NULL);
} else if (call_state == BT_CALL_ACTIVE) {
//user_send_cmd_prepare(USER_CTRL_HFP_CALL_HANGUP, 0, NULL);
} else {
a2dp_state = a2dp_get_status();
if (a2dp_state == BT_MUSIC_STATUS_STARTING) {
if (cmdid == AIS_ASR_CMDID_PAUSE_MUSIC) {
printf("send PAUSE cmd");
user_send_cmd_prepare(USER_CTRL_AVCTP_OPID_PAUSE, 0, NULL);
} else if (cmdid == AIS_ASR_CMDID_STOP_MUSIC) {
printf("send PLAY cmd to STOP");
user_send_cmd_prepare(USER_CTRL_AVCTP_OPID_PLAY, 0, NULL);
}
} else {
if (cmdid == AIS_ASR_CMDID_PLAY_MUSIC) {
printf("send PLAY cmd");
user_send_cmd_prepare(USER_CTRL_AVCTP_OPID_PLAY, 0, NULL);
}
}
}
break;
case AIS_ASR_CMDID_VOLUME_UP:
printf("volume up");
volume_up(4); //music: 0 ~ 16, call: 0 ~ 15, step: 25%
break;
case AIS_ASR_CMDID_VOLUME_DOWN:
printf("volume down");
volume_down(4); //music: 0 ~ 16, call: 0 ~ 15, step: 25%
break;
case AIS_ASR_CMDID_PREV_SONG:
printf("Send PREV cmd");
user_send_cmd_prepare(USER_CTRL_AVCTP_OPID_PREV, 0, NULL);
break;
case AIS_ASR_CMDID_NEXT_SONG:
printf("Send NEXT cmd");
user_send_cmd_prepare(USER_CTRL_AVCTP_OPID_NEXT, 0, NULL);
break;
case AIS_ASR_CMDID_CALL_ACTIVE:
if (get_call_status() == BT_CALL_INCOMING) {
printf("Send ANSWER cmd");
user_send_cmd_prepare(USER_CTRL_HFP_CALL_ANSWER, 0, NULL);
}
break;
case AIS_ASR_CMDID_CALL_HANGUP:
printf("Send HANG UP cmd");
if ((get_call_status() >= BT_CALL_INCOMING) && (get_call_status() <= BT_CALL_ALERT)) {
user_send_cmd_prepare(USER_CTRL_HFP_CALL_HANGUP, 0, NULL);
}
break;
case AIS_ASR_CMDID_ANC_OFF:
printf("Send ANC_OFF cmd");
#if TCFG_AUDIO_ANC_ENABLE
anc_mode_switch(ANC_OFF, 1);
#endif
break;
case AIS_ASR_CMDID_ANC_ON:
printf("Send ANC_ON cmd");
#if TCFG_AUDIO_ANC_ENABLE
anc_mode_switch(ANC_ON, 1);
#endif
break;
case AIS_ASR_CMDID_ANC_TRANSPARENCY:
printf("Send ANC_TRANSPARENCY cmd 11");
#if TCFG_AUDIO_ANC_ENABLE
anc_mode_switch(ANC_TRANSPARENCY, 1);
#endif
break;
case AIS_ASR_CMDID_NULL:
printf("KWS_EVENT_NULL");
break;
default:
break;
}
#endif
}
#endif
/*
* 算法引擎打开函数
*/
static void *aispeech_asr_core_open(int sample_rate)
{
printf("[%s-%d]-----start\n", __func__, __LINE__);
clk_set("sys", ASR_CLK);
printf("aip_asr----clk %d \n", ASR_CLK / 1000000);
mem_stats();
void *core = NULL;
#ifdef AISPEECH_VAD_ASR_MODULE
core = aispeech_vad_asr_init();
aispeech_asr_register_handler(aispeech_asr_output_handler);
#endif /*AISPEECH_VAD_ASR_MODULE*/
#ifdef AISPEECH_ASR_AUDIO_DUMP
audio_asr_export_demo_init();
#endif /*AISPEECH_ASR_AUDIO_DUMP*/
printf("[%s-%d]-----end\n", __func__, __LINE__);
return core;
}
/*
* 算法引擎关闭函数
*/
static void aispeech_asr_core_close(void *core)
{
printf("[%s-%d]-----start\n", __func__, __LINE__);
#ifdef AISPEECH_VAD_ASR_MODULE
aispeech_vad_asr_deinit();
#endif /*AISPEECH_VAD_ASR_MODULE*/
#ifdef AISPEECH_ASR_AUDIO_DUMP
audio_asr_export_demo_deinit();
#endif /*AISPEECH_ASR_AUDIO_DUMP*/
printf("[%s-%d]-----end\n", __func__, __LINE__);
}
/*
* 算法引擎数据处理
*/
static u8 asr_maxcosttime = 0;
static u32 asr_meantime = 0;
static u8 asr_printcnt = 0;
static int aispeech_asr_core_data_handler(void *core, void *data, int len)
{
//printf("[%s-%d]-------len %d \n", __func__, __LINE__, len);
printf(".");
int cur_sys_clk = clk_get("sys");
if (cur_sys_clk < ASR_CLK) {
//printf("[%s-%d]------- %d M\n", __func__, __LINE__, cur_sys_clk/1000000);
clk_set("sys", ASR_CLK);
}
#ifdef AISPEECH_VAD_ASR_MODULE
unsigned long start = jiffies_msec();
aispeech_vad_asr_feed((char *)data, len);
unsigned long end = jiffies_msec();
u8 costtim = end - start;
if (costtim > asr_maxcosttime) {
asr_maxcosttime = costtim;
}
asr_meantime += costtim;
if ((++asr_printcnt) >= 200) {
printf("200 ais asr cost %d ms MAX %d \r\n", asr_meantime / asr_printcnt, asr_maxcosttime);
asr_printcnt = 0;
asr_meantime = 0;
}
#endif /*AISPEECH_VAD_ASR_MODULE*/
return 0;
}
/*
*********************************************************************
* aispeech asr data handler
* Description: 杰理音频平台语音识别数据处理
* Arguments : asr - 语音识别数据管理结构
* Return : 0 - 处理成功, 非0 - 处理失败.
* Note(s) : 该函数通过读取mic数据送入算法引擎完成语音帧.
*********************************************************************
*/
static int aispeech_asr_data_handler(struct ais_platform_asr_context *asr)
{
int result = 0;
if (!asr->mic) {
return -EINVAL;
}
s16 data[ASR_FRAME_SAMPLES];
int len = voice_mic_data_read(asr->mic, data, sizeof(data));
if (len < sizeof(data)) {
return -EINVAL;
}
if (asr->core) {
result = aispeech_asr_core_data_handler(asr->core, data, sizeof(data));
} else {
printf("[%s-%d]---len %d \n", __func__, __LINE__, len);
}
#ifdef AISPEECH_ASR_AUDIO_DUMP
os_mutex_pend(&catch_queue_amutex, 0);
EnCQueue(&aec_uart_catch_queue, gasr_datatempbuf, rlen);
os_mutex_post(&catch_queue_amutex);
os_taskq_post_msg("audio_asr_export_task", 1, 0x01);
#endif /*AISPEECH_ASR_AUDIO_DUMP*/
#if 0 //def AISPEECH_ASR_AUDIO_DUMP
EnCQueue(&aec_uart_catch_queue, gasr_datatempbuf, rlen);
rlen = 512;
while (LengthOfCQueue(&aec_uart_catch_queue) >= rlen) {
DeCQueue(&aec_uart_catch_queue, gasr_datatempbuf, rlen);
/* for (int i = 0; i < 256; i++)
{
gasr_datatempbuf[i] = i * 110 + 1;
} */
aec_uart_fill(0, gasr_datatempbuf, rlen);
aec_uart_fill(1, gasr_datatempbuf, rlen);
aec_uart_fill(2, gasr_datatempbuf, rlen);
aec_uart_fill(3, gasr_datatempbuf, rlen);
aec_uart_fill(4, gasr_datatempbuf, rlen);
putchar('W');
aec_uart_write();
}
#endif
return 0;
}
/*
*********************************************************************
* aispeech_asr core handler
* Description: 思必驰语音识别处理
* Arguments : priv - 语音识别私有数据
* taskq_type - TASK消息类型
* msg - 消息存储指针(对应自身模块post的消息)
* Return : None.
* Note(s) : 音频平台资源控制以及ASR主要识别算法引擎.
*********************************************************************
*/
int aispeech_asr_core_handler(void *priv, int taskq_type, int *msg)
{
struct ais_platform_asr_context *asr = (struct ais_platform_asr_context *)priv;
int err = ASR_CORE_STANDBY;
if (taskq_type != OS_TASKQ) {
return err;
}
switch (msg[0]) {
case SMART_VOICE_MSG_MIC_OPEN: /*语音识别打开 - MIC打开算法引擎打开*/
/* msg[1] - MIC数据源msg[2] - 音频采样率msg[3] - mic的数据总缓冲长度*/
if (asr->data_enable == 0) {
asr->mic = voice_mic_data_open(msg[1], msg[2], msg[3]);
asr->core = aispeech_asr_core_open(msg[2]);
asr->data_enable = 1;
}
err = ASR_CORE_WAKEUP;
break;
case SMART_VOICE_MSG_SWITCH_SOURCE:
/*这里进行MIC的数据源切换 主系统MIC或VAD MIC*/
if (asr->mic) {
voice_mic_data_clear(asr->mic);
voice_mic_data_switch_source(asr->mic, msg[1], msg[2], msg[3]);
}
/* smart_voice_kws_open(sv, msg[4]); */
break;
case SMART_VOICE_MSG_MIC_CLOSE: /*语音识别关闭 - MIC关闭算法引擎关闭*/
/* msg[2] - 信号量*/
if (asr->data_enable == 1) {
voice_mic_data_close(asr->mic);
asr->mic = NULL;
aispeech_asr_core_close(asr->core);
asr->core = NULL;
asr->data_enable = 0;
}
os_sem_post((OS_SEM *)msg[1]);
break;
case SMART_VOICE_MSG_WAKE:
err = ASR_CORE_WAKEUP;
/*putchar('W');*/
/* voice_mic_data_debug_start(sv); */
asr->data_enable = 1;
break;
case SMART_VOICE_MSG_STANDBY:
asr->data_enable = 0;
if (asr->mic) {
voice_mic_data_clear(asr->mic);
}
/* voice_mic_data_debug_stop(sv); */
break;
case SMART_VOICE_MSG_DMA: /*MIC通路数据DMA消息*/
err = ASR_CORE_WAKEUP;
break;
default:
break;
}
if (asr->data_enable) {
err = aispeech_asr_data_handler(asr);
err = err ? ASR_CORE_STANDBY : ASR_CORE_WAKEUP;
}
return err;
}
int __ais_platform_asr_open(u8 mic)
{
if (!config_aispeech_asr_enable) {
return 0;
}
int err = 0;
if (__this) {
smart_voice_core_post_msg(4, SMART_VOICE_MSG_SWITCH_SOURCE, MIC_CAPTURE_BUF_SIZE, AISPEECH_ASR_SAMPLE_RATE, mic);
return 0;
}
struct ais_platform_asr_context *asr = (struct ais_platform_asr_context *)zalloc(sizeof(struct ais_platform_asr_context));
if (!asr) {
return -ENOMEM;
}
err = smart_voice_core_create(asr);
if (err != OS_NO_ERR) {
goto __err;
}
/*
* 推送MIC的资源打开到语音识别主任务
* V1.1.0版本支持了低功耗VAD MIC的使用需要可以改为VAD MIC
*
*/
smart_voice_core_post_msg(4, SMART_VOICE_MSG_MIC_OPEN, mic, AISPEECH_ASR_SAMPLE_RATE, MIC_CAPTURE_BUF_SIZE);
__this = asr;
return 0;
__err:
if (asr) {
free(asr);
}
return err;
}
int ais_platform_asr_open(void)
{
/* return __ais_platform_asr_open(VOICE_MCU_MIC); */
return __ais_platform_asr_open(VOICE_VAD_MIC);
}
void ais_platform_asr_close(void)
{
if (!config_aispeech_asr_enable) {
return;
}
if (__this) {
OS_SEM *sem = (OS_SEM *)malloc(sizeof(OS_SEM));
os_sem_create(sem, 0);
smart_voice_core_post_msg(2, SMART_VOICE_MSG_MIC_CLOSE, (int)sem);
os_sem_pend(sem, 0);
free(sem);
smart_voice_core_free();
free(__this);
__this = NULL;
}
}
int audio_ais_platform_asr_init(struct vad_mic_platform_data *mic_data)
{
lp_vad_mic_data_init(mic_data);
/* __ais_platform_asr_open(VOICE_MCU_MIC); */
__ais_platform_asr_open(VOICE_VAD_MIC);
return 0;
}
/*
* 来电KWS关键词识别
*/
int audio_phone_call_aispeech_asr_start(void)
{
if (__this) {
/*通话语音识别由LP VAD的MIC切到系统主MIC进行识别*/
smart_voice_core_post_msg(4, SMART_VOICE_MSG_SWITCH_SOURCE, MIC_CAPTURE_BUF_SIZE, AISPEECH_ASR_SAMPLE_RATE, VOICE_MCU_MIC);
return 0;
}
__ais_platform_asr_open(VOICE_MCU_MIC);
return 0;
}
/*
* 来电KWS关闭接通或拒接
*/
int audio_phone_call_aispeech_asr_close(void)
{
if (!__this) {
return 0;
}
/*通话语音识别结束后由系统的主MIC切换回LP VAD的MIC源*/
smart_voice_core_post_msg(4, SMART_VOICE_MSG_SWITCH_SOURCE, MIC_CAPTURE_BUF_SIZE, AISPEECH_ASR_SAMPLE_RATE, VOICE_VAD_MIC);
return 0;
}
static u8 aispeech_asr_core_idle_query()
{
if (__this) {
return !(__this->data_enable);
} else {
return 1;
}
}
REGISTER_LP_TARGET(aispeech_asr_core_lp_target) = {
.name = "aispeech_asr_core",
.is_idle = aispeech_asr_core_idle_query,
};

View File

@ -0,0 +1,21 @@
/*****************************************************************
>file name : aispeech_asr.h
>create time : Thu 23 Dec 2021 11:53:40 AM CST
*****************************************************************/
#ifndef _AISPEECH_ASR_H_
#define _AISPEECH_ASR_H_
#include "media/includes.h"
int ais_platform_asr_open(void);
void ais_platform_asr_close(void);
int aispeech_asr_core_handler(void *priv, int taskq_type, int *msg);
int audio_ais_platform_asr_init(struct vad_mic_platform_data *mic_data);
int audio_phone_call_aispeech_asr_start(void);
int audio_phone_call_aispeech_asr_close(void);
#endif

View File

@ -0,0 +1,405 @@
/*****************************************************************
>file name : jl_asr.c
>create time : Sat 16 Apr 2022 09:46:07 AM CST
*****************************************************************/
#define LOG_TAG "[Smart-Voice]"
#define LOG_INFO_ENABLE
#define LOG_DEBUG_ENABLE
#define LOG_DUMP_ENABLE
#define LOG_ERROR_ENABLE
#define LOG_WARN_ENABLE
#include "smart_voice.h"
#include "app_config.h"
#include "debug.h"
#include "includes.h"
#include "voice_mic_data.h"
#include "vad_mic.h"
#include "kws_event.h"
#include "nn_vad.h"
#include "media/jl_kws.h"
#include "audio_codec_clock.h"
#if TCFG_SMART_VOICE_ENABLE
#define SMART_VOICE_TEST_LISTEN_SOUND 0
#define SMART_VOICE_TEST_PRINT_PCM 0
#define SMART_VOICE_TEST_WRITE_FILE 0
#define SMART_VOICE_DEBUG_KWS_RESULT 0
#define AUDIO_NN_VAD_ENABLE 0
/*
* Audio语音识别
*/
struct smart_voice_context {
u8 kws_model;
void *kws;
void *mic;
void *nn_vad;
#if SMART_VOICE_TEST_WRITE_FILE
void *file;
#endif
#if SMART_VOICE_DEBUG_KWS_RESULT
void *dump_hdl;
#endif
};
extern const int config_jl_audio_kws_enable;
#define SMART_VOICE_SAMPLE_RATE 16000
#define SMART_VOICE_REC_MIC_SECS 8
#define SMART_VOICE_REC_DATA_LEN (SMART_VOICE_SAMPLE_RATE * SMART_VOICE_REC_MIC_SECS * 2)
#define SMART_VOICE_KWS_FRAME_LEN (320)
#if SMART_VOICE_TEST_WRITE_FILE
#define VOICE_DATA_BUFFER_SIZE 16 * 1024
#elif SMART_VOICE_TEST_PRINT_PCM
#define VOICE_DATA_BUFFER_SIZE SMART_VOICE_REC_DATA_LEN
#else
#define VOICE_DATA_BUFFER_SIZE 2 * 1024
#endif
#if ((defined TCFG_AUDIO_DATA_EXPORT_ENABLE && TCFG_AUDIO_DATA_EXPORT_ENABLE) || SMART_VOICE_TEST_WRITE_FILE)
#define CONFIG_VAD_KWS_DETECT_ENABLE 0
#else
#define CONFIG_VAD_KWS_DETECT_ENABLE 1
#endif
static struct smart_voice_context *this_sv = NULL;
static u8 volatile smart_voice_wakeup = 0;
#if SMART_VOICE_TEST_LISTEN_SOUND
#include "audio_config.h"
extern struct audio_dac_hdl dac_hdl;
#endif
static inline void smart_voice_data_listen_sound(void *data, int len)
{
#if SMART_VOICE_TEST_LISTEN_SOUND
if (audio_dac_is_working(&dac_hdl)) {
audio_dac_write(&dac_hdl, data, len);
}
#endif
}
static inline void smart_voice_data_write_file(struct smart_voice_context *sv, void *data, int len)
{
#if SMART_VOICE_TEST_WRITE_FILE
if (sv->file) {
fwrite(sv->file, data, sizeof(data));
}
#endif
}
static void voice_mic_data_debug_stop(struct smart_voice_context *sv)
{
#if SMART_VOICE_TEST_PRINT_PCM
if (sv->mic) {
voice_mic_data_dump(sv->mic);
}
#endif
#if SMART_VOICE_TEST_WRITE_FILE
if (sv->file) {
fclose(sv->file);
sv->file = NULL;
}
#endif
}
static void voice_mic_data_debug_start(struct smart_voice_context *sv)
{
#if SMART_VOICE_TEST_WRITE_FILE
sv->file = fopen("storage/sd0/C/AudioVAD/vad***.raw", "w+");
if (!sv->file) {
printf("Open file failed, can not test.\n");
}
#endif
}
static void smart_voice_kws_close(struct smart_voice_context *sv)
{
#if AUDIO_NN_VAD_ENABLE
if (sv->nn_vad) {
audio_nn_vad_close(sv->nn_vad);
sv->nn_vad = NULL;
}
#else
if (sv->kws) {
audio_kws_close(sv->kws);
sv->kws = NULL;
}
#endif
}
static void smart_voice_kws_open(struct smart_voice_context *sv, u8 model)
{
#if AUDIO_NN_VAD_ENABLE
if (sv->nn_vad) {
return;
}
sv->nn_vad = audio_nn_vad_open();
#else
if (sv->kws) {
if (sv->kws_model == model) {
return;
}
smart_voice_kws_close(sv);
}
sv->kws_model = model;
sv->kws = audio_kws_open(model, NULL);//kws_model_files[model]);
#endif
}
/*
* 语音识别的KWS处理
*/
static int smart_voice_data_handler(struct smart_voice_context *sv)
{
int result = 0;
if (!config_jl_audio_kws_enable) {
return 0;
}
#if SMART_VOICE_TEST_PRINT_PCM
putchar('*');
return 1;
#endif
if (!sv->mic) {
return -EINVAL;
}
s16 data[SMART_VOICE_KWS_FRAME_LEN / 2];
int rlen = voice_mic_data_read(sv->mic, data, SMART_VOICE_KWS_FRAME_LEN);
if (rlen < SMART_VOICE_KWS_FRAME_LEN) {
return -EINVAL;
}
if (sv->nn_vad) {
#if AUDIO_NN_VAD_ENABLE
audio_nn_vad_data_handler(sv->nn_vad, data, sizeof(data));
#endif
}
if (sv->kws) {
/*putchar('*');*/
smart_voice_data_listen_sound(data, sizeof(data));
smart_voice_data_write_file(sv, data, sizeof(data));
#if (CONFIG_VAD_KWS_DETECT_ENABLE)
result = audio_kws_detect_handler(sv->kws, (void *)data, sizeof(data));
if (result > 1) {
printf("result : %d\n", result);
}
smart_voice_kws_event_handler(sv->kws_model, result);
#endif
#if SMART_VOICE_DEBUG_KWS_RESULT
smart_voice_kws_dump_result_add(sv->dump_hdl, sv->kws_model, result);
#endif
}
return 0;
}
int smart_voice_core_handler(void *priv, int taskq_type, int *msg)
{
int err = ASR_CORE_STANDBY;
struct smart_voice_context *sv = (struct smart_voice_context *)priv;
if (taskq_type == OS_TASKQ) {
switch (msg[0]) {
case SMART_VOICE_MSG_MIC_OPEN:
sv->mic = voice_mic_data_open(msg[1], msg[2], msg[3]);
if (!sv->mic) {
printf("VAD mic open failed.\n");
}
smart_voice_kws_open(sv, msg[4]);
break;
case SMART_VOICE_MSG_SWITCH_SOURCE:
if (sv->mic) {
voice_mic_data_clear(sv->mic);
voice_mic_data_switch_source(sv->mic, msg[1], msg[2], msg[3]);
}
smart_voice_kws_open(sv, msg[4]);
break;
case SMART_VOICE_MSG_MIC_CLOSE:
smart_voice_kws_close(sv);
voice_mic_data_close(sv->mic);
sv->mic = NULL;
os_sem_post((OS_SEM *)msg[1]);
break;
case SMART_VOICE_MSG_WAKE:
err = ASR_CORE_WAKEUP;
/*putchar('W');*/
voice_mic_data_debug_start(sv);
smart_voice_wakeup = 1;
break;
case SMART_VOICE_MSG_STANDBY:
smart_voice_wakeup = 0;
if (sv->mic) {
voice_mic_data_clear(sv->mic);
}
voice_mic_data_debug_stop(sv);
break;
case SMART_VOICE_MSG_DMA:
err = ASR_CORE_WAKEUP;
msg[0] = (int)audio_codec_clock_check;
msg[1] = 0;
os_taskq_post_type("app_core", Q_CALLBACK, 2, msg);
/*audio_codec_clock_check();*/
break;
}
}
if (smart_voice_wakeup) {
err = smart_voice_data_handler(sv);
err = err ? ASR_CORE_STANDBY : ASR_CORE_WAKEUP;
}
return err;
}
int audio_smart_voice_detect_create(u8 model, u8 mic, int buffer_size)
{
int err = 0;
if (!config_jl_audio_kws_enable) {
return 0;
}
if (this_sv) {
smart_voice_core_post_msg(5, SMART_VOICE_MSG_SWITCH_SOURCE, mic, buffer_size, SMART_VOICE_SAMPLE_RATE, model);
return 0;
}
struct smart_voice_context *sv = (struct smart_voice_context *)zalloc(sizeof(struct smart_voice_context));
if (!sv) {
goto __err;
}
smart_voice_wakeup = 0;
err = smart_voice_core_create(sv);
if (err) {
goto __err;
}
audio_codec_clock_set(SMART_VOICE_MODE, AUDIO_CODING_PCM, 0);//TODO:先将VAD后台时钟设置放到app_core流程调用避免设置时钟临界的情况待clk_set完善后再放到task动态设置
smart_voice_core_post_msg(5, SMART_VOICE_MSG_MIC_OPEN, mic, buffer_size, SMART_VOICE_SAMPLE_RATE, model);
#if SMART_VOICE_DEBUG_KWS_RESULT
if (!sv->dump_hdl) {
sv->dump_hdl = smart_voice_kws_dump_open(2000);
}
#endif
this_sv = sv;
return 0;
__err:
if (sv) {
free(sv);
}
return err;
}
void audio_smart_voice_detect_close(void)
{
if (config_jl_audio_kws_enable && this_sv) {
#if SMART_VOICE_DEBUG_KWS_RESULT
smart_voice_kws_dump_close(this_sv->dump_hdl);
#endif
OS_SEM *sem = (OS_SEM *)malloc(sizeof(OS_SEM));
os_sem_create(sem, 0);
smart_voice_core_post_msg(2, SMART_VOICE_MSG_MIC_CLOSE, (int)sem);
os_sem_pend(sem, 0);
free(sem);
audio_codec_clock_del(SMART_VOICE_MODE);
smart_voice_core_free();
free(this_sv);
this_sv = NULL;
}
}
static void __audio_smart_voice_detect_open(u8 mic, u8 model)
{
#if SMART_VOICE_TEST_WRITE_FILE
extern void force_set_sd_online(char *sdx);
force_set_sd_online("sd0");
void *mnt = mount("sd0", "storage/sd0", "fat", 3, NULL);
if (!mnt) {
printf("sd0 mount fat failed.\n");
}
#endif
audio_smart_voice_detect_create(model, mic, VOICE_DATA_BUFFER_SIZE);
}
void audio_smart_voice_detect_open(u8 model)
{
return __audio_smart_voice_detect_open(model == JL_KWS_COMMAND_KEYWORD ? VOICE_VAD_MIC : VOICE_MCU_MIC, model);
}
int audio_smart_voice_detect_init(struct vad_mic_platform_data *mic_data)
{
lp_vad_mic_data_init(mic_data);
__audio_smart_voice_detect_open(VOICE_VAD_MIC, JL_KWS_COMMAND_KEYWORD);
return 0;
}
/*
* 来电KWS关键词识别
*/
int audio_phone_call_kws_start(void)
{
if (this_sv) {
/*通话语音识别由LP VAD的MIC切到系统主MIC进行识别*/
smart_voice_core_post_msg(5, SMART_VOICE_MSG_SWITCH_SOURCE, VOICE_MCU_MIC, VOICE_DATA_BUFFER_SIZE, SMART_VOICE_SAMPLE_RATE, JL_KWS_CALL_KEYWORD);
return 0;
}
__audio_smart_voice_detect_open(VOICE_MCU_MIC, JL_KWS_CALL_KEYWORD);
return 0;
}
/*
* 来电KWS关闭接通或拒接
*/
int audio_phone_call_kws_close(void)
{
if (!this_sv) {
return 0;
}
/*通话语音识别结束后由系统的主MIC切换回LP VAD的MIC源*/
smart_voice_core_post_msg(5, SMART_VOICE_MSG_SWITCH_SOURCE, VOICE_VAD_MIC, VOICE_DATA_BUFFER_SIZE, SMART_VOICE_SAMPLE_RATE, JL_KWS_COMMAND_KEYWORD);
return 0;
}
static u8 smart_voice_idle_query(void)
{
return !smart_voice_wakeup;
}
static enum LOW_POWER_LEVEL smart_voice_level_query(void)
{
return LOW_POWER_MODE_SLEEP;
}
REGISTER_LP_TARGET(smart_voice_lp_target) = {
.name = "smart_voice",
.level = smart_voice_level_query,
.is_idle = smart_voice_idle_query,
};
void audio_vad_test(void)
{
#if SMART_VOICE_TEST_LISTEN_SOUND
app_audio_state_switch(APP_AUDIO_STATE_MUSIC, get_max_sys_vol());
audio_dac_set_sample_rate(&dac_hdl, SMART_VOICE_SAMPLE_RATE);
audio_dac_set_volume(&dac_hdl, 15);
audio_dac_start(&dac_hdl);
audio_vad_m2p_event_post(M2P_VAD_CMD_TEST);
while (1) {
os_time_dly(10);
}
#endif
}
#endif

View File

@ -0,0 +1,143 @@
/*****************************************************************
>file name : kws_event.c
>author : lichao
>create time : Mon 01 Nov 2021 11:34:00 AM CST
*****************************************************************/
#include "system/includes.h"
#include "kws_event.h"
#include "jl_kws.h"
extern int config_audio_kws_event_enable;
static const int kws_wake_word_event[] = {
KWS_EVENT_NULL,
KWS_EVENT_HEY_KEYWORD,
};
static const int kws_multi_command_event[] = {
KWS_EVENT_NULL,
KWS_EVENT_NULL,
KWS_EVENT_XIAOJIE,
KWS_EVENT_XIAOJIE,
KWS_EVENT_PLAY_MUSIC,
KWS_EVENT_STOP_MUSIC,
KWS_EVENT_PAUSE_MUSIC,
KWS_EVENT_VOLUME_UP,
KWS_EVENT_VOLUME_DOWN,
KWS_EVENT_PREV_SONG,
KWS_EVENT_NEXT_SONG,
KWS_EVENT_ANC_ON,
KWS_EVENT_ANC_OFF,
KWS_EVENT_TRANSARENT_ON,
};
static const int kws_call_command_event[] = {
KWS_EVENT_NULL,
KWS_EVENT_NULL,
KWS_EVENT_CALL_ACTIVE,
KWS_EVENT_CALL_HANGUP,
};
static const int *kws_model_events[3] = {
kws_wake_word_event,
kws_multi_command_event,
kws_call_command_event,
};
int smart_voice_kws_event_handler(u8 model, int kws)
{
if (!config_audio_kws_event_enable || kws < 0) {
return 0;
}
int event = KWS_EVENT_NULL;
struct sys_event e;
event = kws_model_events[model][kws];
if (event == KWS_EVENT_NULL) {
return -EINVAL;
}
e.type = SYS_KEY_EVENT;
e.u.key.event = event;
e.u.key.value = 'V';
e.u.key.type = KEY_DRIVER_TYPE_VOICE;//区分按键类型
sys_event_notify(&e);
return 0;
}
static const char *kws_dump_words[] = {
"no words",
"hey siri",
"xiao jie",
"xiao du",
"bo fang yin yue",
"ting zhi bo fang",
"zan ting bo fang",
"zeng da yin liang",
"jian xiao yin liang",
"shang yi shou",
"xia yi shou",
"jie ting dian hua",
"gua duan dian hua",
"anc on",
"anc off",
"transarent on",
};
struct kws_result_context {
u16 timer;
u32 result[0];
};
static void smart_voice_kws_dump_timer(void *arg)
{
struct kws_result_context *ctx = (struct kws_result_context *)arg;
int i = 0;
int kws_num = ARRAY_SIZE(kws_dump_words);
printf("\n===============================================\nResults:\n");
for (i = 1; i < kws_num; i++) {
printf("%s : %u\n", kws_dump_words[i], ctx->result[i]);
}
printf("\n===============================================\n");
}
void *smart_voice_kws_dump_open(int period_time)
{
if (!config_audio_kws_event_enable) {
return NULL;
}
struct kws_result_context *ctx = NULL;
ctx = zalloc(sizeof(struct kws_result_context) + (sizeof(u32) * ARRAY_SIZE(kws_dump_words)));
if (ctx) {
ctx->timer = sys_timer_add(ctx, smart_voice_kws_dump_timer, period_time);
}
return ctx;
}
void smart_voice_kws_dump_result_add(void *_ctx, u8 model, int kws)
{
if (!config_audio_kws_event_enable || kws < 0) {
return;
}
struct kws_result_context *ctx = (struct kws_result_context *)_ctx;
int event = kws_model_events[model][kws];
ctx->result[event]++;
}
void smart_voice_kws_dump_close(void *_ctx)
{
struct kws_result_context *ctx = (struct kws_result_context *)_ctx;
if (config_audio_kws_event_enable) {
if (ctx->timer) {
sys_timer_del(ctx->timer);
free(ctx);
}
}
}

View File

@ -0,0 +1,21 @@
/*****************************************************************
>file name : kws_event.h
>author : lichao
>create time : Mon 01 Nov 2021 05:08:48 PM CST
*****************************************************************/
#ifndef _SMART_VOICE_KWS_EVENT_H_
#define _SMART_VOICE_KWS_EVENT_H_
#include "media/kws_event.h"
int smart_voice_kws_event_handler(u8 model, int kws);
void *smart_voice_kws_dump_open(int period_time);
void smart_voice_kws_dump_result_add(void *_ctx, u8 model, int kws);
void smart_voice_kws_dump_close(void *_ctx);
#endif

View File

@ -0,0 +1,69 @@
/*****************************************************************
>file name : nn_vad.c
>create time : Thu 16 Dec 2021 07:19:26 PM CST
*****************************************************************/
#include "typedef.h"
#include "media/tech_lib/jlsp_vad.h"
struct audio_nn_vad_context {
void *algo;
u8 *share_buffer;
u8 algo_mem[0];
};
void *audio_nn_vad_open(void)
{
struct audio_nn_vad_context *ctx;
int model_size = 0;
int share_size = 10 * 1024;
int lib_heap_size = JLSP_vad_get_heap_size(NULL, &model_size);
ctx = (struct audio_nn_vad_context *)zalloc(sizeof(struct audio_nn_vad_context) + lib_heap_size + share_size);
if (!ctx) {
return NULL;
}
ctx->share_buffer = ctx->algo_mem + lib_heap_size;
ctx->algo = (void *)JLSP_vad_init((char *)ctx->algo_mem,
lib_heap_size,
(char *)ctx->share_buffer,
share_size,
NULL,
model_size);
if (!ctx->algo) {
goto err;
}
JLSP_vad_reset(ctx->algo);
return ctx;
err:
if (ctx) {
free(ctx);
}
return NULL;
}
int audio_nn_vad_data_handler(void *vad, void *data, int len)
{
struct audio_nn_vad_context *ctx = (struct audio_nn_vad_context *)vad;
int out_flag;
if (ctx) {
return JLSP_vad_process(ctx->algo, data, &out_flag);
}
return 0;
}
void audio_nn_vad_close(void *vad)
{
struct audio_nn_vad_context *ctx = (struct audio_nn_vad_context *)vad;
if (ctx->algo) {
JLSP_vad_free(ctx->algo);
}
free(ctx);
}

View File

@ -0,0 +1,15 @@
/*****************************************************************
>file name : nn_vad.h
>create time : Fri 17 Dec 2021 02:36:53 PM CST
*****************************************************************/
#ifndef _AUDIO_NN_VAD_H_
#define _AUDIO_NN_VAD_H_
void *audio_nn_vad_open(void);
int audio_nn_vad_data_handler(void *vad, void *data, int len);
void audio_nn_vad_close(void *vad);
#endif

View File

@ -0,0 +1,95 @@
/*****************************************************************
>file name : smart_voice.h
>create time : Thu 17 Jun 2021 02:07:32 PM CST
*****************************************************************/
#ifndef _SMART_VOICE_H_
#define _SMART_VOICE_H_
#include "media/includes.h"
#define SMART_VOICE_MSG_WAKE 0 /*唤醒*/
#define SMART_VOICE_MSG_STANDBY 1 /*待机*/
#define SMART_VOICE_MSG_DMA 2 /*语音数据DMA传输*/
#define SMART_VOICE_MSG_MIC_OPEN 3 /*MIC打开*/
#define SMART_VOICE_MSG_SWITCH_SOURCE 4 /*MIC切换*/
#define SMART_VOICE_MSG_MIC_CLOSE 5 /*MIC关闭*/
#define ASR_CORE "audio_vad"
#define ASR_CORE_WAKEUP 0
#define ASR_CORE_STANDBY 1
/*
*********************************************************************
* audio smart voice detect create
* Description: 创建智能语音检测引擎
* Arguments : model - KWS模型
* task_name - VAD引擎task
* mic - MIC的选择低功耗MIC或主控MIC
* buffer_size - MIC的DMA数据缓冲大小
* Return : 0 - 创建成功非0 - 失败.
* Note(s) : None.
*********************************************************************
*/
int audio_smart_voice_detect_create(u8 model, u8 mic, int buffer_size);
/*
*********************************************************************
* audio smart voice detect open
* Description: 智能语音检测打开接口
* Arguments : mic - MIC的选择低功耗MIC或主控MIC
* Return : void.
* Note(s) : None.
*********************************************************************
*/
void audio_smart_voice_detect_open(u8 mic);
/*
*********************************************************************
* audio smart voice detect close
* Description: 智能语音检测关闭接口
* Arguments : void.
* Return : void.
* Note(s) : 关闭引擎的所有资源无论使用哪个mic.
*********************************************************************
*/
void audio_smart_voice_detect_close(void);
/*
*********************************************************************
* audio smart voice detect init
* Description: 智能语音检测配置初始化
* Arguments : mic_data - P11 mic初始化配置.
* Return : void.
* Note(s) : None.
*********************************************************************
*/
int audio_smart_voice_detect_init(struct vad_mic_platform_data *mic_data);
/*
*********************************************************************
* audio phone call kws start
* Description: 启动通话来电关键词识别
* Arguments : void.
* Return : 0 - 成功非0 - 失败.
* Note(s) : 接口会将VAD的低功耗mic切换至通话使用的主控mic.
*********************************************************************
*/
int audio_phone_call_kws_start(void);
/*
*********************************************************************
* audio phone call kws close
* Description: 关闭通话来电关键词识别
* Arguments : void.
* Return : 0 - 成功非0 - 出错.
* Note(s) : 关闭来电关键词识别,通常用于接通后或挂断/拒接后.
*********************************************************************
*/
int audio_phone_call_kws_close(void);
#define smart_voice_core_post_msg(num, ...) os_taskq_post_msg(ASR_CORE, num, __VA_ARGS__)
int smart_voice_core_create(void *priv);
int smart_voice_core_free(void);
#endif

View File

@ -0,0 +1,35 @@
/*****************************************************************
>file name : smart_voice_config.c
>author : lichao
>create time : Mon 01 Nov 2021 11:18:03 AM CST
*****************************************************************/
#include "typedef.h"
#include "app_config.h"
#if (TCFG_SMART_VOICE_ENABLE)
const int config_lp_vad_enable = 1;
const int config_jl_audio_kws_enable = 1; /*KWS 使能*/
const int config_aispeech_asr_enable = 0;
const int config_user_asr_enable = 0;
const int config_audio_kws_event_enable = 1;
#elif ((defined TCFG_AUDIO_ASR_DEVELOP) && (TCFG_AUDIO_ASR_DEVELOP == ASR_CFG_AIS))
const int config_lp_vad_enable = 1;
const int config_jl_audio_kws_enable = 0;
const int config_aispeech_asr_enable = 1;
const int config_user_asr_enable = 0;
const int config_audio_kws_event_enable = 1;
#elif ((defined TCFG_AUDIO_ASR_DEVELOP) && (TCFG_AUDIO_ASR_DEVELOP == ASR_CFG_USER_DEFINED))
const int config_lp_vad_enable = 1;
const int config_jl_audio_kws_enable = 0;
const int config_aispeech_asr_enable = 0;
const int config_user_asr_enable = 1;
const int config_audio_kws_event_enable = 1;
#else
const int config_lp_vad_enable = 0;
const int config_jl_audio_kws_enable = 0;
const int config_aispeech_asr_enable = 0;
const int config_user_asr_enable = 0;
const int config_audio_kws_event_enable = 0;
#endif
const int config_audio_nn_vad_enable = 0;

View File

@ -0,0 +1,91 @@
/*****************************************************************
>file name : smart_voice_core.c
>author : lichao
>create time : Mon 01 Nov 2021 11:35:15 AM CST
*****************************************************************/
#include "smart_voice.h"
#include "vad_mic.h"
#include "includes.h"
#include "os/os_api.h"
/*#include "jl_kws_platform.h"*/
/*#include "aispeech_platform.h"*/
/*#include "user_platform.h"*/
/*****************************************************************
* **算法平台接入说明**
*
* 1、TCFG_SMART_VOICE_ENABLE 为内置VAD+KWS关键词识别算法默认在
* 非通话的模式下使用低功耗VAD MIC作为语音数据MIC。
*
* 2、根据宏定义TCFG_AUDIO_ASR_DEVELOP 选择对应的算法开发平台,在开
* 发框架上实现算法接入。
*
* 3、使用系统内置低功耗VAD作为语音数据MIC唤醒以及数据源需要实现
* 以下三个步骤:
* 1) 配置TCFG_VAD_LOWPOWER_CLOCK 时钟选择为 VAD_CLOCK_USE_PMU_STD12M
* 2) 调用lp_vad_mic_data_init()初始化VAD MIC配置
* 3) 调用voice_mic_data_open()时的数据源选择为VOICE_VAD_MIC
*
*****************************************************************/
extern const int config_jl_audio_kws_enable; /*KWS 使能*/
extern const int config_aispeech_asr_enable;
extern const int config_user_asr_enable;
extern const int config_lp_vad_enable;
extern int smart_voice_core_handler(void *priv, int taskq_type, int *msg);
extern int aispeech_asr_core_handler(void *priv, int taskq_type, int *msg);
extern int user_asr_core_handler(void *priv, int taskq_type, int *msg);
/*
* 智能唤醒和语音识别处理任务
*/
static void smart_voice_core_task(void *arg)
{
int msg[16];
int res;
u8 pend_taskq = 1;
int err = 0;
/*struct smart_voice_context *sv = (struct smart_voice_context *)arg;*/
if (config_lp_vad_enable) {
audio_vad_clock_trim();
}
while (1) {
if (pend_taskq) {
res = os_taskq_pend("taskq", msg, ARRAY_SIZE(msg));
} else {
res = os_taskq_accept(ARRAY_SIZE(msg), msg);
}
err = ASR_CORE_STANDBY;
if (config_jl_audio_kws_enable) {
err = smart_voice_core_handler(arg, res, &msg[1]);
}
if (config_aispeech_asr_enable) {
err = aispeech_asr_core_handler(arg, res, &msg[1]);
}
if (config_user_asr_enable) {
err = user_asr_core_handler(arg, res, &msg[1]);
}
pend_taskq = err ? 1 : 0;
}
}
int smart_voice_core_create(void *priv)
{
int err = task_create(smart_voice_core_task, priv, ASR_CORE);
if (err != OS_NO_ERR) {
return -EINVAL;
}
return 0;
}
int smart_voice_core_free(void)
{
task_kill(ASR_CORE);
return 0;
}

View File

@ -0,0 +1,178 @@
/*****************************************************************
>file name : user_asr.c
>create time : Thu 23 Dec 2021 10:00:58 AM CST
>用户自定义语音识别算法平台接入
*****************************************************************/
#include "system/includes.h"
#include "user_asr.h"
#include "smart_voice.h"
#include "voice_mic_data.h"
#define ASR_FRAME_SAMPLES 160 /*语音识别帧长(采样点)*/
struct user_platform_asr_context {
void *mic;
void *core;
u8 data_enable;
};
extern const int config_user_asr_enable;
static struct user_platform_asr_context *__this = NULL;
/*
* 算法引擎打开函数
*/
static void *user_asr_core_open(int sample_rate)
{
return NULL;
}
/*
* 算法引擎关闭函数
*/
static void user_asr_core_close(void *core)
{
}
/*
* 算法引擎数据处理
*/
static int user_asr_core_data_handler(void *core, void *data, int len)
{
return 0;
}
/*
*********************************************************************
* user asr data handler
* Description: 用户算法平台平台语音识别数据处理
* Arguments : asr - 语音识别数据管理结构
* Return : 0 - 处理成功, 非0 - 处理失败.
* Note(s) : 该函数通过读取mic数据送入算法引擎完成语音帧.
*********************************************************************
*/
static int user_asr_data_handler(struct user_platform_asr_context *asr)
{
int result = 0;
if (!asr->mic) {
return -EINVAL;
}
s16 data[ASR_FRAME_SAMPLES];
int len = voice_mic_data_read(asr->mic, data, sizeof(data));
if (len < sizeof(data)) {
return -EINVAL;
}
if (asr->core) {
result = user_asr_core_data_handler(asr->core, data, sizeof(data));
}
return 0;
}
/*
*********************************************************************
* user_asr core handler
* Description: 用户自定义语音识别处理
* Arguments : priv - 语音识别私有数据
* taskq_type - TASK消息类型
* msg - 消息存储指针(对应自身模块post的消息)
* Return : None.
* Note(s) : 音频平台资源控制以及ASR主要识别算法引擎.
*********************************************************************
*/
int user_asr_core_handler(void *priv, int taskq_type, int *msg)
{
struct user_platform_asr_context *asr = (struct user_platform_asr_context *)priv;
int err = ASR_CORE_STANDBY;
if (taskq_type != OS_TASKQ) {
return err;
}
switch (msg[0]) {
case SMART_VOICE_MSG_MIC_OPEN: /*语音识别打开 - MIC打开算法引擎打开*/
/* msg[1] - MIC数据源msg[2] - 音频采样率msg[3] - mic的数据总缓冲长度*/
asr->mic = voice_mic_data_open(msg[1], msg[2], msg[3]);
asr->core = user_asr_core_open(msg[2]);
asr->data_enable = 1;
err = ASR_CORE_WAKEUP;
break;
case SMART_VOICE_MSG_SWITCH_SOURCE:
/*这里进行MIC的数据源切换 主系统MIC或VAD MIC*/
break;
case SMART_VOICE_MSG_MIC_CLOSE: /*语音识别关闭 - MIC关闭算法引擎关闭*/
/* msg[2] - 信号量*/
voice_mic_data_close(asr->mic);
asr->mic = NULL;
user_asr_core_close(asr->core);
asr->core = NULL;
asr->data_enable = 0;
os_sem_post((OS_SEM *)msg[1]);
break;
case SMART_VOICE_MSG_DMA: /*MIC通路数据DMA消息*/
err = ASR_CORE_WAKEUP;
break;
default:
break;
}
if (asr->data_enable) {
err = user_asr_data_handler(asr);
err = err ? ASR_CORE_STANDBY : ASR_CORE_WAKEUP;
}
return err;
}
int user_platform_asr_open(void)
{
if (!config_user_asr_enable) {
return 0;
}
int err = 0;
struct user_platform_asr_context *asr = (struct user_platform_asr_context *)zalloc(sizeof(struct user_platform_asr_context));
if (!asr) {
return -ENOMEM;
}
err = smart_voice_core_create(asr);
if (err != OS_NO_ERR) {
goto __err;
}
/*
* 推送MIC的资源打开到语音识别主任务
* V1.1.0版本支持了低功耗VAD MIC的使用需要可以改为VAD MIC
*
*/
smart_voice_core_post_msg(4, SMART_VOICE_MSG_MIC_OPEN, VOICE_VAD_MIC, 16000, 4096);
__this = asr;
return 0;
__err:
if (asr) {
free(asr);
}
return err;
}
void user_platform_asr_close(void)
{
if (!config_user_asr_enable) {
return;
}
if (__this) {
OS_SEM *sem = (OS_SEM *)malloc(sizeof(OS_SEM));
os_sem_create(sem, 0);
smart_voice_core_post_msg(2, SMART_VOICE_MSG_MIC_CLOSE, (int)sem);
os_sem_pend(sem, 0);
free(sem);
smart_voice_core_free();
free(__this);
__this = NULL;
}
}

View File

@ -0,0 +1,15 @@
/*****************************************************************
>file name : user_asr.h
>create time : Thu 23 Dec 2021 11:53:40 AM CST
*****************************************************************/
#ifndef _USER_ASR_H_
#define _USER_ASR_H_
int user_platform_asr_open(void);
void user_platform_asr_close(void);
int user_asr_core_handler(void *priv, int taskq_type, int *msg);
#endif

View File

@ -0,0 +1,213 @@
/*****************************************************************
>file name : vco_clock_trim.c
>author : IC lishanliao
>create time : Mon 22 Nov 2021 02:44:34 PM CST
>Decription : 通过vco时钟的trim使vco clock接近29MHz
这样adc采样对应电压更准确
*****************************************************************/
#include "vad_mic.h"
#include "voice_mic_data.h"
static u8 vco_clock_calibrated = 0;
#define VAD_EN_10V_1_(x) SFR(P11_LPVAD->VAD_ACON0, 29, 1, x)
#define VAD_ACM_10V_4_(x) SFR(P11_LPVAD->VAD_ACON0, 0, 4, x)
#define VAD_LDO_IS_10V_2_(x) SFR(P11_LPVAD->VAD_ACON1, 0, 2, x)
#define VAD_LDO_VS_10V_3_(x) SFR(P11_LPVAD->VAD_ACON1, 2, 3, x)
#define VAD_BUF_EN_10V_1_(x) SFR(P11_LPVAD->VAD_ACON0, 17, 1, x)
#define VAD_BUF_IS_10V_2_(x) SFR(P11_LPVAD->VAD_ACON0, 18, 2, x)
#define VAD_PGA_EN_10V_1_(x) SFR(P11_LPVAD->VAD_ACON1, 22, 1, x)
#define VAD_PGA_IS_10V_2_(x) SFR(P11_LPVAD->VAD_ACON1, 27, 2, x)
#define VAD_PGA_GS_10V_4_(x) SFR(P11_LPVAD->VAD_ACON1, 23, 4, x)
#define VAD_BUF2INN_EN_10V_1_(x) SFR(P11_LPVAD->VAD_ACON0, 16, 1, x)
#define VAD_ADC_EN_10V_1_(x) SFR(P11_LPVAD->VAD_ACON0, 4, 1, x)
#define VAD_ADC_RIN_10V_3_(x) SFR(P11_LPVAD->VAD_ACON0, 9, 3, x)
#define VAD_ADC_RBS_10V_3_(x) SFR(P11_LPVAD->VAD_ACON0, 6, 3, x)
#define VAD_ADC_TEN_10V_1_(x) SFR(P11_LPVAD->VAD_ACON0, 12, 1, x)
#define VAD_VCON_TOE_10V_1_(x) SFR(P11_LPVAD->VAD_CON , 27, 1, x)
#define VAD_VCOP_TOE_10V_1_(x) SFR(P11_LPVAD->VAD_CON , 28, 1, x)
#define VAD_IREF1U_TOE_10V_1_(x) SFR(P11_LPVAD->VAD_ACON1, 30, 1, x)
void vco_clock_test_mode_setup(void)
{
P11_LPVAD->VAD_ACON0 = 0;
P11_LPVAD->VAD_ACON1 = 0;
P11_LPVAD->VAD_CON = 0;
SFR(JL_ADDA->ADDA_CON1, 1, 1, 1);//enable vad analog to ANA pad
VAD_EN_10V_1_(1) ;
VAD_ACM_10V_4_(7) ;
VAD_LDO_VS_10V_3_(3) ;
VAD_LDO_IS_10V_2_(2) ;
VAD_BUF_EN_10V_1_(1) ;
VAD_BUF_IS_10V_2_(2) ;
VAD_PGA_EN_10V_1_(1) ;
VAD_PGA_IS_10V_2_(2) ;
VAD_PGA_GS_10V_4_(4) ;
VAD_BUF2INN_EN_10V_1_(1);
VAD_ADC_EN_10V_1_(1) ;
VAD_ADC_RIN_10V_3_(5) ;
VAD_ADC_RBS_10V_3_(5) ;
VAD_VCON_TOE_10V_1_(1) ;
VAD_VCOP_TOE_10V_1_(1) ;
VAD_IREF1U_TOE_10V_1_(1);
}
/*
void vco_clk2io_test(void)
{
//vad_vcon0->PA6
//vad_vcop0->PA7
SFR(JL_CLOCK->CLK_CON0, 24, 4, 15); //clk_out2_sel sel vad_vcon0
SFR(JL_CLOCK->CLK_CON0, 28, 4, 15); //clk_out2_sel sel vad_vcop0
SFR(JL_IOMC->OCH_CON0, 0, 5, 16); //
SFR(JL_IOMC->OCH_CON0, 5, 9, 17);
JL_OMAP->PA6_OUT = (0 << 0) | //input enable
(1 << 1) | //output enable
(0 << 2); //output signal select
JL_OMAP->PA7_OUT = (0 << 0) | //input enable
(1 << 1) | //output enable
(1 << 2); //output signal select
JL_PORTA->DIR &= ~BIT(6);
JL_PORTA->DIR &= ~BIT(7);
printf("============== start VCO_CLK_IO test =================");
}
*/
void delay1us(u16 i)
{
int num = clk_get("sys") / 1000000 * i;
while (num--) {
asm("nop");
}
}
#define GPC_FD_MUX_3_(x) SFR(JL_CLOCK->CLK_CON2,28,4,x)
//osc clk mux 0:btosc24m 1:btosc48m 2:std24m
// 3:rtc_osc 4:lrc_clk 5:pat_clk
#define OSC_CLK_MUX_3_(x) SFR(JL_CLOCK->CLK_CON0,1,3,x)
//0:lsb_clk 1:osc_clk 2:cap_mux_clk 3:clk_mux
//4:gpc_fd(9:vad_vcon 10:vad_vcop) 5:ring_clk
//6:pll_d1p0_mo 7:irflt_in
#define GPCNT_CSS_3_(x) SFR(JL_GPCNT->CON,1,3,x)//count source select
#define GPCNT_GSS_3_(x) SFR(JL_GPCNT->CON,12,3,x)//gate source select
#define GPCNT_GTS_4_(x) SFR(JL_GPCNT->CON,8,4,x)//gate time select 32*2^n
#define GPCNT_EN_1_(x) SFR(JL_GPCNT->CON,0,1,x)
#define CLR_PND_1_(x) SFR(JL_GPCNT->CON,6,1,x)
u32 get_gpcnt(u8 sel_p_n, u8 gs_clk, u8 cs_clk, u8 prd)
{
u32 gpcnt_n = 0;
/*printf("start get cnt\r\n");*/
GPCNT_EN_1_(0);
if (sel_p_n == 0) {
GPC_FD_MUX_3_(9);//9:vad_vcon 10:vad_vcop
} else {
GPC_FD_MUX_3_(10);//9:vad_vcon 10:vad_vcop
}
OSC_CLK_MUX_3_(gs_clk);
GPCNT_CSS_3_(cs_clk);
GPCNT_GSS_3_(1);
GPCNT_GTS_4_(prd);//32*2^n
GPCNT_EN_1_(1);
CLR_PND_1_(1);
delay1us(1000);
asm("csync");
asm("csync");
asm("csync");
while (!(JL_GPCNT->CON >> 7 & 0x01));
asm("csync");
asm("csync");
asm("csync");
gpcnt_n = JL_GPCNT->NUM;
return gpcnt_n;
}
#define FCNT_N 10
u32 get_avr_gpcnt(void)
{
u32 f_cnt = 0, i = 0, fc_num = 0, f_max = 0, f_min = 1000000;
// delay_1000ms();
for (i = 0; i < FCNT_N; i++) {
//osc clk mux 0:btosc24m 1:btosc48m 2:std24m
// 3:rtc_osc 4:lrc_clk 5:pat_clk
// 32*2^(7)=4096
f_cnt = get_gpcnt(1, 2, 4, 7); //32*2^n
//f_cnt = get_gpcnt(1,0,4,7); //32*2^n
//printf("===========get vco clk cnt=%d \r\r\n\n",f_cnt);
fc_num = f_cnt + fc_num;
if (f_max < f_cnt) {
f_max = f_cnt;
}
if (f_min > f_cnt) {
f_min = f_cnt;
}
wdt_clear();
}
//printf("===========get vco f_min clk cnt=%d \r\r\n\n",f_min);
//printf("===========get vco f_max clk cnt=%d \r\r\n\n",f_max);
f_cnt = (fc_num - f_max - f_min) / (FCNT_N - 2);
return f_cnt;
}
#define VCO_TRIM_CNT 8
#define VCO_TRIM_TAR 4949 // 24/32.99= 32*2^7/VCO_TRIM_TAR
void vad_vco_clk_trim(void)
{
u32 gp_cnt[VCO_TRIM_CNT], gp_min, gp_ind;
u8 i;
os_time_dly(80); //500ms delay
for (i = 0; i < VCO_TRIM_CNT; i++) {
VAD_ADC_RIN_10V_3_(i) ;
VAD_ADC_RBS_10V_3_(i) ;
gp_cnt[i] = get_avr_gpcnt();
printf("get vco clk average cnt[%d]=%d\n", i, gp_cnt[i]);
}
for (i = 0; i < VCO_TRIM_CNT; i++) {
if (gp_cnt[i] > VCO_TRIM_TAR) {
gp_cnt[i] = gp_cnt[i] - VCO_TRIM_TAR;
} else {
gp_cnt[i] = VCO_TRIM_TAR - gp_cnt[i];
}
if (i == 0) {
gp_min = gp_cnt[i];
gp_ind = i;
} else if (gp_min > gp_cnt[i]) {
gp_min = gp_cnt[i];
gp_ind = i;
}
wdt_clear();
}
printf("set vco clk index=%d,res=%d\n", gp_ind, gp_min);
/*VAD_ADC_RIN_10V_3_(gp_ind) ;*/
/*VAD_ADC_RBS_10V_3_(gp_ind) ;*/
struct vad_mic_platform_data *data = (struct vad_mic_platform_data *)(VAD_AVAD_CONFIG_BEGIN + sizeof(struct avad_config));
data->mic_data.adc_rin = gp_ind;
data->mic_data.adc_rbs = gp_ind;
}
void audio_vad_clock_trim(void)
{
if (vco_clock_calibrated) {
return;
}
vco_clock_test_mode_setup();
/*vco_clk2io_test();*/
vad_vco_clk_trim();
vco_clock_calibrated = 1;
}

View File

@ -0,0 +1,203 @@
/*****************************************************************
>file name : vad_mic.c
>create time : Fri 15 Apr 2022 10:27:55 AM CST
*****************************************************************/
#include "smart_voice.h"
#include "vad_mic.h"
#include "asm/efuse.h"
#include "voice_mic_data.h"
#include "update/update.h"
struct low_power_vad_mic {
void *priv;
int (*dma_output)(void *, s16 *, int);
};
extern const int config_lp_vad_enable;
struct low_power_vad_mic *lp_vad = NULL;
static DEFINE_SPINLOCK(lp_vad_lock);
//===========================================================================//
// AUDIO_VAD //
//===========================================================================//
#define AUDIO_VAD_CBUF_ADDR VAD_CBUF_BEGIN
static void p11_vad_mic_dma_irq_handler(void)
{
int buffered_frames = P11_LPVAD->DMA_SHN / VOICE_MIC_DATA_PERIOD_FRAMES * VOICE_MIC_DATA_PERIOD_FRAMES;
int buffered_bytes = buffered_frames * VOICE_ADC_SAMPLE_CH * 2;
u8 *read_ptr = (u8 *)((s16 *)AUDIO_VAD_CBUF_ADDR + P11_LPVAD->DMA_SPTR * VOICE_ADC_SAMPLE_CH);
int write_len = 0;
if (P11_LPVAD->DMA_SPTR + buffered_frames > P11_LPVAD->DMA_LEN) {
int read_len = (P11_LPVAD->DMA_LEN - P11_LPVAD->DMA_SPTR) * VOICE_ADC_SAMPLE_CH * 2;
spin_lock(&lp_vad_lock);
if (lp_vad && lp_vad->dma_output) {
lp_vad->dma_output(lp_vad->priv, (s16 *)read_ptr, read_len);
}
spin_unlock(&lp_vad_lock);
read_ptr = (u8 *)AUDIO_VAD_CBUF_ADDR;
buffered_bytes -= read_len;
}
spin_lock(&lp_vad_lock);
if (lp_vad && lp_vad->dma_output) {
lp_vad->dma_output(lp_vad->priv, (s16 *)read_ptr, buffered_bytes);
}
spin_unlock(&lp_vad_lock);
/*更新P11 LPVAD读指针通过p11传递不可直接设置硬件*/
AUDIO_VAD_DMA_READ_UPDATE(buffered_frames / VOICE_MIC_DATA_PERIOD_FRAMES);
}
/*static u8 p2m_active = 0;*/
/*
* Event from IRQ
*
*/
void audio_vad_coprocessor_event_handler(int event)
{
if (!config_lp_vad_enable) {
return;
}
int msg = SMART_VOICE_MSG_STANDBY;
switch (event) {
case P2M_VAD_TRIGGER_START:
/*p2m_active = 1;*/
msg = SMART_VOICE_MSG_WAKE;
p11_vad_mic_dma_irq_handler();
break;
case P2M_VAD_TRIGGER_DMA:
msg = SMART_VOICE_MSG_DMA;
p11_vad_mic_dma_irq_handler();
break;
case P2M_VAD_TRIGGER_STOP:
msg = SMART_VOICE_MSG_STANDBY;
/*p2m_active = 0;*/
break;
default:
break;
}
smart_voice_core_post_msg(1, msg);
}
/*
* 由主系统P11响应中断调用
*/
void audio_vad_p2mevent_irq_handler(void)
{
audio_vad_coprocessor_event_handler(P2M_MESSAGE_VAD_CMD);
}
static void lp_vad_mic_in_enable(struct vad_mic_platform_data *data)
{
if (!config_lp_vad_enable) {
return;
}
gpio_set_direction(IO_PORTA_01, 1);
gpio_set_die(IO_PORTA_01, 0);
gpio_set_pull_up(IO_PORTA_01, 0);
gpio_set_pull_down(IO_PORTA_01, 0);
if (data->mic_data.mic_mode == AUDIO_MIC_CAP_DIFF_MODE || data->mic_data.mic_bias_inside) {
gpio_set_direction(IO_PORTA_02, 1);
gpio_set_die(IO_PORTA_02, 0);
gpio_set_pull_up(IO_PORTA_02, 0);
gpio_set_pull_down(IO_PORTA_02, 0);
}
}
int lp_vad_mic_data_init(struct vad_mic_platform_data *mic_data)
{
struct vad_mic_platform_data *data = (struct vad_mic_platform_data *)(VAD_AVAD_CONFIG_BEGIN + sizeof(struct avad_config));
memcpy(data, mic_data, sizeof(struct vad_mic_platform_data));
u8 vbg_trim = get_vad_vbg_trim();
if (vbg_trim != 0xf) {
data->power_data.acm_select = get_vad_vbg_trim();
}
lp_vad_mic_in_enable(mic_data);
return 0;
}
void *lp_vad_mic_open(void *priv, int (*dma_output)(void *priv, s16 *data, int len))
{
if (!config_lp_vad_enable) {
return NULL;
}
if (!lp_vad) {
lp_vad = zalloc(sizeof(struct low_power_vad_mic));
if (!lp_vad) {
return NULL;
}
}
P11_VAD_IRQ_ENABLE();
/*
* P11 VAD初始化
*/
struct avad_config *avad_cfg = (struct avad_config *)VAD_AVAD_CONFIG_BEGIN;
struct dvad_config *dvad_cfg = (struct dvad_config *)VAD_DVAD_CONFIG_BEGIN;
//=================================//
// AVAD 效果参数配置 //
//=================================//
avad_cfg->avad_quantile_p = 3; //0.8
avad_cfg->avad_gain_db = 10;
avad_cfg->avad_compare_v = 3;
//=================================//
// DVAD 效果参数配置 //
//=================================//
dvad_cfg->dvad_gain_id = 10;
dvad_cfg->d2a_th_db = 20;
dvad_cfg->d_frame_con = 100;
dvad_cfg->d2a_frame_con = 100;
dvad_cfg->d_stride1 = 3;//<<7
dvad_cfg->d_stride2 = 5;//<<7
dvad_cfg->d_low_con_th = 6;
dvad_cfg->d_high_con_th = 3;
printf("avad_cfg @ 0x%x, dvad_cfg @ 0x%x", (u32)avad_cfg, (u32)dvad_cfg);
lp_vad->priv = priv;
lp_vad->dma_output = dma_output;
audio_vad_m2p_event_post(M2P_VAD_CMD_INIT);
return lp_vad;
}
void lp_vad_mic_disable(void)
{
audio_vad_m2p_event_post(M2P_VAD_CMD_CLOSE);
}
void lp_vad_mic_close(void *vad)
{
if (!config_lp_vad_enable) {
return;
}
if (!vad) {
return;
}
lp_vad_mic_disable();
spin_lock(&lp_vad_lock);
if (vad) {
free(vad);
}
lp_vad = NULL;
spin_unlock(&lp_vad_lock);
}
void lp_vad_mic_test(void)
{
audio_vad_m2p_event_post(M2P_VAD_CMD_TEST);
}
u8 vad_disable(void)
{
lp_vad_mic_disable();
return 0;
}
REGISTER_UPDATE_TARGET(vad_update_target) = {
.name = "vad",
.driver_close = vad_disable,
};

View File

@ -0,0 +1,75 @@
/*****************************************************************
>file name : vad_mic.h
>author : lichao
>create time : Mon 01 Nov 2021 05:07:50 PM CST
*****************************************************************/
#ifndef _P11_VAD_H_
#define _P11_VAD_H_
#include "includes.h"
#include "asm/power/p11.h"
#include "audio_adc.h"
//--------------------- p11 VAD参数配置 ---------------------------------//
struct avad_config {
int avad_quantile_p;
int avad_th;
int avad_gain_db;
int avad_compare_v;
};
struct dvad_config {
int d_low_con_th;
int d_high_con_th;
int d2a_th_db;
int d2a_frame_con;
int dvad_gain_id;
int d_frame_con;
int d_stride1;
int d_stride2;
};
#define P11_VAD_IRQ_ENABLE() \
P11_SYSTEM->M2P_INT_IE |= BIT(M2P_VAD_INDEX);
//===========================================================================//
// Master to P11 EVENT POST //
//===========================================================================//
enum M2P_VAD_CMD_TABLE {
M2P_VAD_CMD_INIT = 0x55,
M2P_VAD_CMD_FRAME,
M2P_VAD_CMD_CLOSE,
M2P_VAD_CMD_TEST,
};
//===========================================================================//
// P11 VAD EVENT CMD //
//===========================================================================//
enum P2M_VAD_CMD_TABLE {
P2M_VAD_TRIGGER_START = 0xAA,
P2M_VAD_TRIGGER_DMA,
P2M_VAD_TRIGGER_STOP,
};
static inline void audio_vad_m2p_event_post(enum M2P_VAD_CMD_TABLE cmd)
{
M2P_MESSAGE_VAD_CMD = cmd;
P11_M2P_INT_SET = BIT(M2P_VAD_INDEX);
}
#define AUDIO_VAD_DMA_READ_UPDATE(n) \
M2P_MESSAGE_VAD_CBUF_RPTR = (n); \
audio_vad_m2p_event_post(M2P_VAD_CMD_FRAME);
void audio_vad_clock_trim(void);
int lp_vad_mic_data_init(struct vad_mic_platform_data *mic_data);
void *lp_vad_mic_open(void *priv, int (*dma_output)(void *priv, s16 *data, int len));
void lp_vad_mic_close(void *vad);
void lp_vad_mic_test(void);
void lp_vad_mic_disable(void);
#endif

View File

@ -0,0 +1,395 @@
/*****************************************************************
>file name : voice_mic_data.c
>author : lichao
>create time : Mon 01 Nov 2021 11:33:32 AM CST
*****************************************************************/
#include "app_config.h"
#include "voice_mic_data.h"
#include "smart_voice.h"
#include "vad_mic.h"
#include "app_main.h"
#include "audio_anc.h"
#if TCFG_AUDIO_ANC_ENABLE
#include "audio_anc.h"
#endif/*TCFG_AUDIO_ANC_ENABLE*/
extern struct audio_adc_hdl adc_hdl;
#define CONFIG_VOICE_MIC_DATA_DUMP 0
#define CONFIG_VOICE_MIC_DATA_EXPORT 0
#if CONFIG_VOICE_MIC_DATA_EXPORT
#include "aec_uart_debug.h"
#endif
#define MAIN_ADC_GAIN app_var.aec_mic_gain
#if TCFG_AUDIO_TRIPLE_MIC_ENABLE
#define MAIN_ADC_CH_NUM 3
#elif TCFG_AUDIO_DUAL_MIC_ENABLE
#define MAIN_ADC_CH_NUM 2
#else
#define MAIN_ADC_CH_NUM 1
#endif
extern s16 esco_adc_buf[];
struct main_adc_context {
struct audio_adc_output_hdl dma_output;
struct adc_mic_ch mic_ch;
/*s16 dma_buf[VOICE_MIC_DATA_PERIOD_FRAMES * 2 * MAIN_ADC_CH_NUM];*/
s16 *dma_buf;
#if (MAIN_ADC_CH_NUM > 1)
s16 mic0_sample_data[VOICE_MIC_DATA_PERIOD_FRAMES];
#endif /*MAIN_ADC_CH_NUM*/
};
/*
* Mic 数据接收buffer(循环buffer动态大小)
*/
struct voice_mic_data {
u8 open;
u8 source;
struct main_adc_context *main_adc;
void *vad_mic;
struct list_head head;
cbuffer_t cbuf;
u8 buf[0];
};
struct voice_mic_capture_channel {
void *priv;
void (*output)(void *priv, s16 *data, int len);
struct list_head entry;
};
static struct voice_mic_data *voice_handle = NULL;
#define __this (voice_handle)
#if CONFIG_VOICE_MIC_DATA_DUMP
static u8 mic_data_dump = 0;
#endif
static int voice_mic_data_output(void *priv, s16 *data, int len)
{
struct voice_mic_data *voice = (struct voice_mic_data *)priv;
struct voice_mic_capture_channel *ch;
list_for_each_entry(ch, &voice->head, entry) {
if (ch->output) {
ch->output(ch->priv, data, len);
}
}
int wlen = cbuf_write(&voice->cbuf, data, len);
if (wlen < len) {
putchar('D');
}
return wlen;
}
static void audio_main_adc_dma_data_handler(void *priv, s16 *data, int len)
{
struct voice_mic_data *voice = (struct voice_mic_data *)priv;
if (!voice || voice->source != VOICE_MCU_MIC) {
return;
}
s16 *pcm_data = data;
#if (MAIN_ADC_CH_NUM > 1)
pcm_data = voice->main_adc->mic0_sample_data;
int frames = len >> 1;
int i = 0;
for (i = 0; i < frames; i++) {
pcm_data[i] = data[i * MAIN_ADC_CH_NUM];
}
#endif
voice_mic_data_output(voice, pcm_data, len);
smart_voice_core_post_msg(1, SMART_VOICE_MSG_DMA);
}
#if TCFG_CALL_KWS_SWITCH_ENABLE
static void audio_main_adc_mic_close(struct voice_mic_data *voice, u8 all_channel);
static void audio_main_adc_suspend_handler(int all_channel, int arg)
{
OS_SEM *sem = (OS_SEM *)arg;
if (__this) {
audio_main_adc_mic_close(__this, all_channel);
}
os_sem_post(sem);
}
static void audio_main_adc_suspend_in_core_task(u8 all_channel)
{
if (!__this || !__this->main_adc) {
return;
}
int argv[5];
OS_SEM *sem = malloc(sizeof(OS_SEM));
os_sem_create(sem, 0);
argv[0] = (int)audio_main_adc_suspend_handler;
argv[1] = 2;
argv[2] = all_channel;
argv[3] = (int)sem;
do {
int err = os_taskq_post_type(ASR_CORE, Q_CALLBACK, 4, argv);
if (err == OS_ERR_NONE) {
break;
}
if (err != OS_Q_FULL) {
audio_main_adc_suspend_handler(all_channel, (int)sem);
goto exit;
}
os_time_dly(2);
} while (1);
os_sem_pend(sem, 100);
exit:
free(sem);
}
void kws_aec_data_output(void *priv, s16 *data, int len)
{
if (!__this || __this->source != VOICE_MCU_MIC) {
return;
}
audio_main_adc_suspend_in_core_task(0);
voice_mic_data_output(__this, data, len);
smart_voice_core_post_msg(1, SMART_VOICE_MSG_DMA);
}
u8 kws_get_state(void)
{
if (!__this || __this->source != VOICE_MCU_MIC) {
return 0;
}
//aec初始化, 查询是否进入kws模式, 这时有可能需要kws本身打开了mic需要close
audio_main_adc_suspend_in_core_task(1);
return 1;
}
#endif
#define audio_main_adc_mic_ch_setup(ch, mic_ch, ch_map, adc_handle) \
if (ch_map & BIT(ch)) { \
audio_adc_mic##ch##_open(mic_ch, ch_map, adc_handle); \
}
static int audio_main_adc_mic_open(struct voice_mic_data *voice)
{
if (!voice->main_adc) {
voice->main_adc = zalloc(sizeof(struct main_adc_context));
}
if (!voice->main_adc) {
return -ENOMEM;
}
#if TCFG_AUDIO_ANC_ENABLE && (!TCFG_AUDIO_DYNAMIC_ADC_GAIN)
MAIN_ADC_GAIN = audio_anc_ffmic_gain_get();
#elif TCFG_AUDIO_ANC_ENABLE && TCFG_AUDIO_DYNAMIC_ADC_GAIN
anc_dynamic_micgain_start(MAIN_ADC_GAIN);
#endif/*TCFG_AUDIO_ANC_ENABLE && (!TCFG_AUDIO_DYNAMIC_ADC_GAIN)*/
voice->main_adc->dma_buf = esco_adc_buf;
audio_adc_mic_open(&voice->main_adc->mic_ch, AUDIO_ADC_MIC_0, &adc_hdl);
audio_adc_mic_set_gain(&voice->main_adc->mic_ch, MAIN_ADC_GAIN);
#ifdef TCFG_AUDIO_ADC_MIC_CHA
audio_main_adc_mic_ch_setup(1, &voice->main_adc->mic_ch, TCFG_AUDIO_ADC_MIC_CHA, &adc_hdl);
audio_main_adc_mic_ch_setup(2, &voice->main_adc->mic_ch, TCFG_AUDIO_ADC_MIC_CHA, &adc_hdl);
audio_main_adc_mic_ch_setup(3, &voice->main_adc->mic_ch, TCFG_AUDIO_ADC_MIC_CHA, &adc_hdl);
#endif
audio_adc_mic_set_sample_rate(&voice->main_adc->mic_ch, 16000);
audio_adc_mic_set_buffs(&voice->main_adc->mic_ch, voice->main_adc->dma_buf,
VOICE_MIC_DATA_PERIOD_FRAMES * 2, 2);
voice->main_adc->dma_output.priv = voice;
voice->main_adc->dma_output.handler = audio_main_adc_dma_data_handler;
audio_adc_add_output_handler(&adc_hdl, &voice->main_adc->dma_output);
audio_adc_mic_start(&voice->main_adc->mic_ch);
return 0;
}
static void audio_main_adc_mic_close(struct voice_mic_data *voice, u8 all_channel)
{
if (voice->main_adc) {
#if TCFG_AUDIO_ANC_ENABLE && TCFG_AUDIO_DYNAMIC_ADC_GAIN
anc_dynamic_micgain_stop();
#endif/*TCFG_AUDIO_ANC_ENABLE && TCFG_AUDIO_DYNAMIC_ADC_GAIN*/
if (all_channel) {
audio_adc_mic_close(&voice->main_adc->mic_ch);
}
audio_adc_del_output_handler(&adc_hdl, &voice->main_adc->dma_output);
free(voice->main_adc);
voice->main_adc = NULL;
}
}
void *voice_mic_data_open(u8 source, int buffer_size, int sample_rate)
{
if (!__this) {
__this = zalloc(sizeof(struct voice_mic_data) + buffer_size);
}
if (!__this) {
return NULL;
}
if (__this->open) {
return __this;
}
cbuf_init(&__this->cbuf, __this->buf, buffer_size);
__this->source = source;
INIT_LIST_HEAD(&__this->head);
#if CONFIG_VOICE_MIC_DATA_EXPORT
aec_uart_open(1, VOICE_MIC_DATA_PERIOD_FRAMES * 2);
#endif
if (source == VOICE_VAD_MIC) {
__this->vad_mic = lp_vad_mic_open((void *)__this, voice_mic_data_output);
} else if (source == VOICE_MCU_MIC) {
audio_main_adc_mic_open(__this);
smart_voice_core_post_msg(1, SMART_VOICE_MSG_WAKE);
}
__this->open = 1;
return __this;
}
void voice_mic_data_close(void *mic)
{
struct voice_mic_data *voice = (struct voice_mic_data *)mic;
if (voice->source == VOICE_VAD_MIC) {
lp_vad_mic_close(voice->vad_mic);
voice->vad_mic = NULL;
} else if (voice->source == VOICE_MCU_MIC) {
audio_main_adc_mic_close(voice, 1);
smart_voice_core_post_msg(1, SMART_VOICE_MSG_STANDBY);
}
#if CONFIG_VOICE_MIC_DATA_EXPORT
aec_uart_close();
#endif
if (voice) {
free(voice);
}
__this = NULL;
}
void voice_mic_data_switch_source(void *mic, u8 source, int buffer_size, int sample_rate)
{
struct voice_mic_data *voice = (struct voice_mic_data *)mic;
if (voice->source == source) {
return;
}
voice->source = source;
if (voice->source == VOICE_VAD_MIC) {
audio_main_adc_mic_close(voice, 1);
voice->vad_mic = lp_vad_mic_open(voice, voice_mic_data_output);
} else if (voice->source == VOICE_MCU_MIC) {
lp_vad_mic_close(voice->vad_mic);
voice->vad_mic = NULL;
audio_main_adc_mic_open(voice);
smart_voice_core_post_msg(1, SMART_VOICE_MSG_WAKE);
}
}
void *voice_mic_data_capture(int sample_rate, void *priv, void (*output)(void *priv, s16 *data, int len))
{
struct voice_mic_capture_channel *ch = (struct voice_mic_capture_channel *)zalloc(sizeof(struct voice_mic_capture_channel));
if (!ch) {
return NULL;
}
voice_mic_data_open(VOICE_VAD_MIC, 2048, sample_rate);
if (!__this) {
free(ch);
return NULL;
}
ch->priv = priv;
ch->output = output;
list_add(&ch->entry, &__this->head);
lp_vad_mic_test();
return ch;
}
void voice_mic_data_stop_capture(void *mic)
{
struct voice_mic_capture_channel *ch = (struct voice_mic_capture_channel *)mic;
if (ch) {
list_del(&ch->entry);
free(ch);
}
if (list_empty(&__this->head)) {
lp_vad_mic_test();
}
}
int voice_mic_data_read(void *mic, void *data, int len)
{
struct voice_mic_data *fb = (struct voice_mic_data *)mic;
int wlen = 0;
if (cbuf_get_data_len(&fb->cbuf) < len) {
return 0;
} else {
wlen = cbuf_read(&fb->cbuf, data, len);
#if CONFIG_VOICE_MIC_DATA_EXPORT
aec_uart_fill(0, data, len);
aec_uart_write();
#endif
return wlen;
}
}
int voice_mic_data_buffered_samples(void *mic)
{
struct voice_mic_data *fb = (struct voice_mic_data *)mic;
return cbuf_get_data_len(&fb->cbuf) >> 1;
}
void voice_mic_data_clear(void *mic)
{
struct voice_mic_data *fb = (struct voice_mic_data *)mic;
cbuf_clear(&fb->cbuf);
}
void voice_mic_data_dump(void *mic)
{
struct voice_mic_data *fb = (struct voice_mic_data *)mic;
#if CONFIG_VOICE_MIC_DATA_DUMP
mic_data_dump = 1;
int len = 0;
int i = 0;
s16 *data = (s16 *)cbuf_read_alloc(&fb->cbuf, &len);
len >>= 1;
if (data) {
#if 0
for (i = 0; i < len; i++) {
if ((i % 3000) == 0) {
wdt_clear();
}
printf("%d\n", data[i]);
}
#else
put_buf(data, len << 1);
#endif
}
cbuf_read_updata(&fb->cbuf, len << 1);
mic_data_dump = 0;
#endif
}

View File

@ -0,0 +1,39 @@
/*****************************************************************
>file name : voice_mic_data.h
>author : lichao
>create time : Mon 01 Nov 2021 05:08:11 PM CST
*****************************************************************/
#ifndef _VOICE_MIC_DATA_H_
#define _VOICE_MIC_DATA_H_
#include "asm/audio_adc.h"
#define VOICE_VAD_MIC 0
#define VOICE_MCU_MIC 1
#define VOICE_DEFULT_MIC VOICE_VAD_MIC
#define VOICE_ADC_SAMPLE_RATE 16000
#define VOICE_ADC_SAMPLE_CH 1
#define VOICE_MIC_DATA_SAMPLE_PREIOD 10
#define VOICE_MIC_DATA_PERIOD_FRAMES (VOICE_MIC_DATA_SAMPLE_PREIOD * VOICE_ADC_SAMPLE_RATE / 1000)
void *voice_mic_data_open(u8 source, int buffer_size, int sample_rate);
void voice_mic_data_close(void *mic);
int voice_mic_data_read(void *mic, void *data, int len);
int voice_mic_data_buffered_samples(void *mic);
void voice_mic_data_clear(void *mic);
void voice_mic_data_dump(void *mic);
void *voice_mic_data_capture(int sample_rate, void *priv, void (*output)(void *priv, s16 *data, int len));
void voice_mic_data_stop_capture(void *mic);
void voice_mic_data_switch_source(void *mic, u8 source, int buffer_size, int sample_rate);
#endif