【嵌入式AI挑战营,进阶】人脸检测和识别DEMO
[复制链接]
一,背景
之前一直对端侧AI感兴趣,尤其是人脸失败这方面。于是开发板都买了好几块,手里有RV1106的,有RV1126带5寸屏的,还一个瑞芯微官方RV1106g2版本的开发板。但是眼高手低, 把开发环境搭建完后,开发板就长期在吃灰。这次借论坛的活动的机会,重新动起来。感谢电子工程世界论坛,感谢luckfox。
二,开发过程
本次活动是要基于insight face来实现多人的人脸识别,python版本的移植到rv1106上并跑起来,不太现实。于是选择了cpp版本的。跟其他网友的过程差不多,解决了些编译错误,使用mobile opencv库,编译是过了。但是官方提供的模型只支持rv1126的,自己搭建环境再去训练模型,再转rknn部署到板子上,不知道何年何月去了,关键是还不会。
为了能出结果,在luckfox的demo修改下,实现人脸检测和人脸识别加rtsp推流。luckfox的demo使用的RetinaFace和facenet这2中算法。其实insight face应该也是是把这些人脸检测和人脸识别的算法,都集合到一起。
luckfox的官方文档有详细的介绍。本次实现大概流程如下:
1.下载源码
luckfox的官方示例源码路径:
2,拼凑代码
rknn_example仓库下的的demo实现人脸检测和识别,但是在显示屏上显示。rkmpi_example仓库下的demo实现了人脸检测和rtsp推流。
于是选择rknn_example仓库“luckfox_pico_retinaface_facenet”和rkmpi_example仓库下的“luckfox_pico_rtsp_retinaface_osd”这2个工程组合起来。
3,增加一个参考人脸注册功能,并在人脸识别时标出姓名。
代码如下:
main.c
/*-------------------------------------------
Includes
-------------------------------------------*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <pthread.h>
#include "retinaface_facenet.h"
#include <time.h>
#include <sys/time.h>
#include "rtsp_demo.h"
#include "luckfox_mpi.h"
#include <iostream>
#include <map>
#include <string>
void face_register(std::map<std::string, float *> &face_map, const std::string &image_path, rknn_app_context_t *app_facenet_ctx);
#define DISP_WIDTH 720
#define DISP_HEIGHT 480
MB_POOL src_Pool;
MB_BLK src_Blk;
unsigned char *src_data;
VIDEO_FRAME_INFO_S h264_frame;
RK_U32 H264_TimeRef = 0;
VENC_STREAM_S stFrame;
static int rkmpi_init(void)
{
// rkmpi init
if (RK_MPI_SYS_Init() != RK_SUCCESS)
{
RK_LOGE("rk mpi sys init fail!");
return -1;
}
// h264_frame
stFrame.pstPack = (VENC_PACK_S *)malloc(sizeof(VENC_PACK_S));
// Create Pool
MB_POOL_CONFIG_S PoolCfg;
memset(&PoolCfg, 0, sizeof(MB_POOL_CONFIG_S));
PoolCfg.u64MBSize = DISP_WIDTH * DISP_HEIGHT * 3;
PoolCfg.u32MBCnt = 1;
PoolCfg.enAllocType = MB_ALLOC_TYPE_DMA;
// PoolCfg.bPreAlloc = RK_FALSE;
src_Pool = RK_MPI_MB_CreatePool(&PoolCfg);
printf("Create Pool success !\n");
// Get MB from Pool
src_Blk = RK_MPI_MB_GetMB(src_Pool, DISP_WIDTH * DISP_HEIGHT * 3, RK_TRUE);
// Build h264_frame
h264_frame.stVFrame.u32Width = DISP_WIDTH;
h264_frame.stVFrame.u32Height = DISP_HEIGHT;
h264_frame.stVFrame.u32VirWidth = DISP_WIDTH;
h264_frame.stVFrame.u32VirHeight = DISP_HEIGHT;
h264_frame.stVFrame.enPixelFormat = RK_FMT_RGB888;
h264_frame.stVFrame.u32FrameFlag = 160;
h264_frame.stVFrame.pMbBlk = src_Blk;
src_data = (unsigned char *)RK_MPI_MB_Handle2VirAddr(src_Blk);
// venc init
RK_CODEC_ID_E enCodecType = RK_VIDEO_ID_AVC;
venc_init(0, DISP_WIDTH, DISP_HEIGHT, enCodecType);
return 0;
}
rtsp_demo_handle g_rtsplive = NULL;
rtsp_session_handle g_rtsp_session;
static void rtsp_init(void)
{
// rtsp init
g_rtsplive = create_rtsp_demo(554);
g_rtsp_session = rtsp_new_session(g_rtsplive, "/live/0");
rtsp_set_video(g_rtsp_session, RTSP_CODEC_ID_VIDEO_H264, NULL, 0);
rtsp_sync_video_ts(g_rtsp_session, rtsp_get_reltime(), rtsp_get_ntptime());
}
static void *GetMediaBuffer(void *arg)
{
(void)arg;
printf("========%s========\n", __func__);
void *pData = RK_NULL;
int s32Ret;
VENC_STREAM_S stFrame;
stFrame.pstPack = (VENC_PACK_S *)malloc(sizeof(VENC_PACK_S));
while (1)
{
s32Ret = RK_MPI_VENC_GetStream(0, &stFrame, -1);
if (s32Ret == RK_SUCCESS)
{
if (g_rtsplive && g_rtsp_session)
{
pData = RK_MPI_MB_Handle2VirAddr(stFrame.pstPack->pMbBlk);
rtsp_tx_video(g_rtsp_session, (uint8_t *)pData, stFrame.pstPack->u32Len,
stFrame.pstPack->u64PTS);
rtsp_do_event(g_rtsplive);
}
s32Ret = RK_MPI_VENC_ReleaseStream(0, &stFrame);
if (s32Ret != RK_SUCCESS)
{
RK_LOGE("RK_MPI_VENC_ReleaseStream fail %x", s32Ret);
}
}
usleep(10 * 1000);
}
printf("\n======exit %s=======\n", __func__);
free(stFrame.pstPack);
return NULL;
}
/*-------------------------------------------
Main Function
-------------------------------------------*/
int main(int argc, char **argv)
{
system("RkLunch-stop.sh");
const char *model_path = "./model/RetinaFace.rknn";
const char *model_path2 = "./model/mobilefacenet.rknn";
const char *image_path = "./model";
// Model Input
// Retinaface
int retina_width = 640;
int retina_height = 640;
// Facenet
int facenet_width = 160;
int facenet_height = 160;
int channels = 3;
int disp_width = DISP_WIDTH;
int disp_height = DISP_HEIGHT;
int ret;
rknn_app_context_t app_retinaface_ctx;
rknn_app_context_t app_facenet_ctx;
object_detect_result_list od_results;
memset(&app_retinaface_ctx, 0, sizeof(rknn_app_context_t));
memset(&app_facenet_ctx, 0, sizeof(rknn_app_context_t));
// Init Model
init_retinaface_facenet_model(model_path, model_path2, &app_retinaface_ctx, &app_facenet_ctx);
// Init Opencv-mobile
cv::VideoCapture cap;
cv::Mat bgr(disp_height, disp_width, CV_8UC3);
cv::Mat retina_input(retina_height, retina_width, CV_8UC3, app_retinaface_ctx.input_mems[0]->virt_addr);
cap.set(cv::CAP_PROP_FRAME_WIDTH, disp_width);
cap.set(cv::CAP_PROP_FRAME_HEIGHT, disp_height);
cap.open(0);
char show_text[128];
rkmpi_init();
rtsp_init();
pthread_t main_thread;
pthread_create(&main_thread, NULL, GetMediaBuffer, NULL);
printf("init success\n");
cv::Mat frame(cv::Size(DISP_WIDTH, DISP_HEIGHT), CV_8UC3, src_data);
cv::Mat facenet_input(facenet_width, facenet_height, CV_8UC3, app_facenet_ctx.input_mems[0]->virt_addr);
std::map<std::string, float *> face_map;
face_register(face_map, image_path, &app_facenet_ctx);
for (const auto &pair : face_map)
{
std::cout << "name: " << pair.first << " registered" << std::endl;
}
float out_fp32[128];
while (1)
{
h264_frame.stVFrame.u32TimeRef = H264_TimeRef++;
h264_frame.stVFrame.u64PTS = TEST_COMM_GetNowUs();
// opencv get photo
cap >> bgr;
#if 1
cv::resize(bgr, retina_input, cv::Size(retina_width, retina_height), 0, 0, cv::INTER_LINEAR);
ret = inference_retinaface_model(&app_retinaface_ctx, &od_results);
if (ret != 0)
{
printf("init_retinaface_model fail! ret=%d\n", ret);
return -1;
}
for (int i = 0; i < od_results.count; i++)
{
// Get det
object_detect_result *det_result = &(od_results.results[i]);
mapCoordinates(bgr, retina_input, &det_result->box.left, &det_result->box.top);
mapCoordinates(bgr, retina_input, &det_result->box.right, &det_result->box.bottom);
cv::rectangle(bgr, cv::Point(det_result->box.left, det_result->box.top),
cv::Point(det_result->box.right, det_result->box.bottom), cv::Scalar(0, 255, 0), 3);
// Face capture
cv::Rect roi(det_result->box.left, det_result->box.top,
(det_result->box.right - det_result->box.left),
(det_result->box.bottom - det_result->box.top));
cv::Mat face_img = bgr(roi);
// Give five key points
// for(int j = 0; j < 5;j ++)
// {
// //printf("point_x = %d point_y = %d\n",det_result->point[j].x,
// // det_result->point[j].y);
// cv::circle(bgr,cv::Point(det_result->point[j].x,det_result->point[j].y),10,cv::Scalar(0,255,0),3);
// }
letterbox(face_img, facenet_input);
ret = rknn_run(app_facenet_ctx.rknn_ctx, nullptr);
if (ret < 0)
{
printf("rknn_run fail! ret=%d\n", ret);
return -1;
}
output_normalization(&app_facenet_ctx, (uint8_t *)(app_facenet_ctx.output_mems[0]->virt_addr), out_fp32);
for (const auto &pair : face_map)
{
float norm = get_duclidean_distance(pair.second, out_fp32);
if (norm < 1.0)
{
snprintf(show_text, sizeof(show_text), "%s=%.2f", pair.first.c_str(), norm);
cv::putText(bgr, show_text, cv::Point(det_result->box.left, det_result->box.top - 8),
cv::FONT_HERSHEY_SIMPLEX, 0.5,
cv::Scalar(0, 255, 0),
1);
}
}
}
#endif
cv::cvtColor(bgr, frame, cv::COLOR_BGR2RGB);
// send stream
// encode H264
RK_MPI_VENC_SendFrame(0, &h264_frame, -1);
}
RK_MPI_VENC_StopRecvFrame(0);
RK_MPI_VENC_DestroyChn(0);
// Destory MB
RK_MPI_MB_ReleaseMB(src_Blk);
// Destory Pool
RK_MPI_MB_DestroyPool(src_Pool);
free(stFrame.pstPack);
release_facenet_model(&app_facenet_ctx);
release_retinaface_model(&app_retinaface_ctx);
pthread_join(main_thread, NULL);
return 0;
}
人脸注册代码:
#include <dirent.h>
#include <iostream>
#include <map>
#include <string>
#include <sys/types.h>
#include "retinaface_facenet.h"
bool is_image_file(const std::string &filename)
{
const std::string extensions[] = {".jpg", ".jpeg", ".png", ".bmp", ".gif"};
for (const auto &ext : extensions)
{
if (filename.size() >= ext.size() &&
filename.compare(filename.size() - ext.size(), ext.size(), ext) == 0)
{
return true;
}
}
return false;
}
void face_register(std::map<std::string, float *> &face_map, const std::string &image_path, rknn_app_context_t *app_facenet_ctx)
{
DIR *dir = opendir(image_path.c_str());
if (dir == nullptr)
{
perror("opendir");
return;
}
std::cout << "image_path: " << image_path << std::endl;
struct dirent *entry;
int ret;
while ((entry = readdir(dir)) != nullptr)
{
std::string filename = entry->d_name;
if (entry->d_type == DT_REG && is_image_file(filename))
{
std::string filepath = image_path + "/" + filename;
std::cout << "filepath: " << filepath << std::endl;
cv::Mat image = cv::imread(filepath);
cv::Mat facenet_input_ref(160, 160, CV_8UC3, app_facenet_ctx->input_mems[0]->virt_addr);
letterbox(image, facenet_input_ref);
int ret = rknn_run(app_facenet_ctx->rknn_ctx, nullptr);
if (ret < 0)
{
printf("rknn_run fail! ret=%d\n", ret);
return;
}
float *out_fp32_ref = new float[128];
output_normalization(app_facenet_ctx, (uint8_t *)(app_facenet_ctx->output_mems[0]->virt_addr), out_fp32_ref);
// 去掉文件名后缀
size_t last_dot = filename.find_last_of(".");
std::string filename_without_extension = (last_dot == std::string::npos) ? filename : filename.substr(0, last_dot);
face_map[filename_without_extension] = out_fp32_ref;
}
}
closedir(dir);
}
三最终效果
|