578|1

395

帖子

10

TA的资源

一粒金砂(高级)

楼主
 

嵌入式工程师AI挑战营RV1106人脸识别+流水记录(7)PC推理 [复制链接]

接上回,Y轴坐标偏移问题已找到,问题出在对输入图片的尺寸处理上

模型输入尺寸是640,640的,图片需要进行缩放后放入模型推理,得到输入结果后也需要按照原来图片的尺寸进行放大还原

#
import os
import sys
import urllib
import urllib.request
import time
import numpy as np
import cv2

import insightface
from insightface.app import FaceAnalysis
from common import Face
from image import get_image as ins_get_image

from math import ceil
from itertools import product as product

from rknn.api import RKNN
DATASET_PATH = './dataset.txt'
DEFAULT_QUANT = True

def letterbox_resize(image, size, bg_color):
    """
    letterbox_resize the image according to the specified size
    :param image: input image, which can be a NumPy array or file path
    :param size: target size (width, height)
    :param bg_color: background filling data 
    :return: processed image
    """
    if isinstance(image, str):
        image = cv2.imread(image)

    target_width, target_height = size
    image_height, image_width, _ = image.shape

    # 计算调整后的图像尺寸
    aspect_ratio = min(target_width / image_width, target_height / image_height)
    new_width = int(image_width * aspect_ratio)
    new_height = int(image_height * aspect_ratio)

    # 使用 cv2.resize() 进行等比缩放
    image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_AREA)

    # 创建新的画布并进行填充
    result_image = np.ones((target_height, target_width, 3), dtype=np.uint8) * bg_color
    offset_x = (target_width - new_width) // 2
    offset_y = (target_height - new_height) // 2
    result_image[offset_y:offset_y + new_height, offset_x:offset_x + new_width] = image
    return result_image, aspect_ratio, offset_x, offset_y

def PriorBox(image_size): #image_size Support (320,320) and (640,640)
    anchors = []
    min_sizes = [[16, 32], [64, 128], [256, 512]]
    steps = [8, 16, 32]
    feature_maps = [[ceil(image_size[0] / step), ceil(image_size[1] / step)] for step in steps]
    for k, f in enumerate(feature_maps):
        min_sizes_ = min_sizes[k]
        for i, j in product(range(f[0]), range(f[1])):
            for min_size in min_sizes_:
                s_kx = min_size / image_size[1]
                s_ky = min_size / image_size[0]
                dense_cx = [x * steps[k] / image_size[1] for x in [j + 0.5]]
                dense_cy = [y * steps[k] / image_size[0] for y in [i + 0.5]]
                for cy, cx in product(dense_cy, dense_cx):
                    anchors += [cx, cy, s_kx, s_ky]
    output = np.array(anchors).reshape(-1, 4)
    print("image_size:",image_size," num_priors=",output.shape[0])
    return output


def box_decode(loc, priors):
    """Decode locations from predictions using priors to undo
    the encoding we did for offset regression at train time.
    Args:
        loc (tensor): location predictions for loc layers,
            Shape: [num_priors,4]
        priors (tensor): Prior boxes in center-offset form.
            Shape: [num_priors,4].
        variances: (list[float]) Variances of priorboxes
    Return:
        decoded bounding box predictions
    """
    variances = [0.1, 0.2]
    boxes = np.concatenate((
        priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
        priors[:, 2:] * np.exp(loc[:, 2:] * variances[1])), axis=1)
    boxes[:, :2] -= boxes[:, 2:] / 2
    boxes[:, 2:] += boxes[:, :2]
    return boxes


def decode_landm(pre, priors):
    """Decode landm from predictions using priors to undo
    the encoding we did for offset regression at train time.
    Args:
        pre (tensor): landm predictions for loc layers,
            Shape: [num_priors,10]
        priors (tensor): Prior boxes in center-offset form.
            Shape: [num_priors,4].
        variances: (list[float]) Variances of priorboxes
    Return:
        decoded landm predictions
    """
    variances = [0.1, 0.2]
    landmarks = np.concatenate((
        priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
        priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
        priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
        priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
        priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:]
    ), axis=1)
    return landmarks


def nms(dets, thresh):
    """Pure Python NMS baseline."""
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]

    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    order = scores.argsort()[::-1]

    keep = []
    while order.size > 0:
        i = order[0]
        keep.append(i)
        xx1 = np.maximum(x1[i], x1[order[1:]])
        yy1 = np.maximum(y1[i], y1[order[1:]])
        xx2 = np.minimum(x2[i], x2[order[1:]])
        yy2 = np.minimum(y2[i], y2[order[1:]])

        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        ovr = inter / (areas[i] + areas[order[1:]] - inter)

        inds = np.where(ovr <= thresh)[0]
        order = order[inds + 1]

    return keep

def distance2bbox(points, distance, max_shape=None):
    """Decode distance prediction to bounding box.

    Args:
        points (Tensor): Shape (n, 2), [x, y].
        distance (Tensor): Distance from the given point to 4
            boundaries (left, top, right, bottom).
        max_shape (tuple): Shape of the image.

    Returns:
        Tensor: Decoded bboxes.
    """
    x1 = points[:, 0] - distance[:, 0]
    y1 = points[:, 1] - distance[:, 1]
    x2 = points[:, 0] + distance[:, 2]
    y2 = points[:, 1] + distance[:, 3]
    if max_shape is not None:
        x1 = x1.clamp(min=0, max=max_shape[1])
        y1 = y1.clamp(min=0, max=max_shape[0])
        x2 = x2.clamp(min=0, max=max_shape[1])
        y2 = y2.clamp(min=0, max=max_shape[0])
    return np.stack([x1, y1, x2, y2], axis=-1)

def distance2kps(points, distance, max_shape=None):
    """Decode distance prediction to bounding box.

    Args:
        points (Tensor): Shape (n, 2), [x, y].
        distance (Tensor): Distance from the given point to 4
            boundaries (left, top, right, bottom).
        max_shape (tuple): Shape of the image.

    Returns:
        Tensor: Decoded bboxes.
    """
    preds = []
    for i in range(0, distance.shape[1], 2):
        px = points[:, i%2] + distance[:, i]
        py = points[:, i%2+1] + distance[:, i+1]
        if max_shape is not None:
            px = px.clamp(min=0, max=max_shape[1])
            py = py.clamp(min=0, max=max_shape[0])
        preds.append(px)
        preds.append(py)
    return np.stack(preds, axis=-1)

def draw_on(img, faces):
    import cv2
    dimg = img.copy()
    for i in range(len(faces)):
        face = faces[i]
        box = face.bbox.astype(int)
        color = (0, 0, 255)
        cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2)
        if face.kps is not None:
            kps = face.kps.astype(int)
            #print(landmark.shape)
            for l in range(kps.shape[0]):
                color = (0, 0, 255)
                if l == 0 or l == 3:
                    color = (0, 255, 0)
                cv2.circle(dimg, (kps[l][0], kps[l][1]), 1, color,
                               2)
        if face.gender is not None and face.age is not None:
            cv2.putText(dimg,'%s,%d'%(face.sex,face.age), (box[0]-1, box[1]-4),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,255,0),1)

        #for key, value in face.items():
        #    if key.startswith('landmark_3d'):
        #        print(key, value.shape)
        #        print(value[0:10,:])
        #        lmk = np.round(value).astype(int)
        #        for l in range(lmk.shape[0]):
        #            color = (255, 0, 0)
        #            cv2.circle(dimg, (lmk[l][0], lmk[l][1]), 1, color,
        #                       2)
    return dimg 
     
     
       
if __name__ == '__main__':
    # 创建RKNN对象 
    rknn = RKNN()

    # 预处理设置
    print('--> Config model')
    '''
    rknn.config(mean_values=[[104, 117, 123]], std_values=[[1, 1, 1]], target_platform="rv1106b",
                quantized_algorithm="normal",dynamic_input=[[[1,3,640,640]]], quant_img_RGB2BGR=True,remove_reshape=True,remove_weight=True,model_pruning=True)  # mmse
    '''
    rknn.config(mean_values=[[127.5, 127.5, 127.5]], std_values=[[128.0, 128.0, 128.0]], target_platform="rv1106b",
                quantized_algorithm="normal",dynamic_input=[[[1,3,640,640]]],
                quant_img_RGB2BGR=False,remove_reshape=False,remove_weight=False,model_pruning=True)  # mmse
    print('done')

    # 载入模型
    print('--> Loading model')
    ret = rknn.load_onnx(model="./det_10g.onnx")#,input_size_list=[[1,3,640,640]])
    if ret != 0:
        print('Load model failed!')
        exit(ret)
    print('done')

    # 创建模型
    print('--> Building model')
    ret = rknn.build(do_quantization=True, dataset=DATASET_PATH)#,rknn_batch_size=1)
    if ret != 0:
        print('Build model failed!')
        exit(ret)
    print('done')


    # 输入图像
    '''
    img = cv2.imread('./test.jpg')
    img_height, img_width, _ = img.shape
    model_height, model_width = (640, 640)
    letterbox_img, aspect_ratio, offset_x, offset_y = letterbox_resize(img, (model_height,model_width), 114)  # letterbox缩放
    infer_img = letterbox_img[..., ::-1]  # BGR2RGB
    infer_img = np.expand_dims(infer_img, 0)
    print(infer_img.shape)
    
    '''
    '''
    img = cv2.imread('./test.jpg')
    #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img,(640,640))
    infer_img = np.expand_dims(img, 0)
    
    '''
    img = cv2.imread('./test.jpg')
    input_size=(640,640)
    im_ratio = float(img.shape[0]) / img.shape[1]
    model_ratio = float(input_size[1]) / input_size[0]
    if im_ratio>model_ratio:
        new_height = input_size[1]
        new_width = int(new_height / im_ratio)
    else:
        new_width = input_size[0]
        new_height = int(new_width * im_ratio)
    det_scale = float(new_height) / img.shape[0]
    print(new_width,new_height)
    resized_img = cv2.resize(img, (new_width, new_height))
    det_img = np.zeros( (input_size[1], input_size[0], 3), dtype=np.uint8 )
    det_img[:new_height, :new_width, :] = resized_img
    infer_img=np.expand_dims(det_img, 0)
    
    # 初始化运行时环境
    print('--> Init runtime environment')
    ret = rknn.init_runtime()
    if ret != 0:
        print('Init runtime environment failed!')
        exit(ret)
    print('done')

    # 运行
    print('--> Running model')
    '''
    outputs = rknn.inference(inputs=[img], data_format=['nhwc'])
    np.save('./onnx_resnet50v2_0.npy', outputs[0])
    x = outputs[0]
    output = np.exp(x)/np.sum(np.exp(x))
    outputs = [output]
    show_outputs(outputs)
    print('done')
    '''
    
    outputs = rknn.inference(inputs=[infer_img])#, data_format=['nhwc'])
    #print (outputs)
    scores_list = []
    bboxes_list = []
    kpss_list = [] 
    print("forward-------------------") 
    input_height = 640
    input_width = 640
    fmc = 3
    threshold=0.5
    feat_stride_fpn=[8, 16, 32]
    num_anchors = 2
    for idx, stride in enumerate(feat_stride_fpn):
        scores = outputs[idx]
        bbox_preds = outputs[idx+fmc]
        bbox_preds = bbox_preds * stride
        kps_preds = outputs[idx+fmc*2] * stride
        height = input_height // stride
        width = input_width // stride
        K = height * width
        key = (height, width, stride)
        center_cache = {}       
        if key in center_cache:
            anchor_centers = center_cache[key]
        else:
                #solution-1, c style:
                #anchor_centers = np.zeros( (height, width, 2), dtype=np.float32 )
                #for i in range(height):
                #    anchor_centers[i, :, 1] = i
                #for i in range(width):
                #    anchor_centers[:, i, 0] = i

                #solution-2:
                #ax = np.arange(width, dtype=np.float32)
                #ay = np.arange(height, dtype=np.float32)
                #xv, yv = np.meshgrid(np.arange(width), np.arange(height))
                #anchor_centers = np.stack([xv, yv], axis=-1).astype(np.float32)

                #solution-3:
            anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
                #print(anchor_centers.shape)

            anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
            if num_anchors>1:
                anchor_centers = np.stack([anchor_centers]*num_anchors, axis=1).reshape( (-1,2) )
            if len(center_cache)<100:
                center_cache[key] = anchor_centers
                      
        pos_inds = np.where(scores>=threshold)[0]
        print(stride,pos_inds,scores,threshold)
        bboxes = distance2bbox(anchor_centers, bbox_preds)
        
        pos_scores = scores[pos_inds]
        pos_bboxes = bboxes[pos_inds]
        #print(stride,pos_inds,pos_bboxes)
        scores_list.append(pos_scores)
        bboxes_list.append(pos_bboxes)
       
        kpss = distance2kps(anchor_centers, kps_preds)
        #kpss = kps_preds
        kpss = kpss.reshape( (kpss.shape[0], -1, 2) )
        pos_kpss = kpss[pos_inds]
        kpss_list.append(pos_kpss)
    #self.forward
    print("bboxes_list---------------")
    print (bboxes_list)
    '''
    print("scores_list---------------")
    print (scores_list)
    print("bboxes_list---------------")
    print (bboxes_list)
    print("kpss_list-----------------") 
    print (kpss_list)
    print("-----------------------")
    '''  
    det_scale=0.5            
    scores = np.vstack(scores_list)
    scores_ravel = scores.ravel()
    order = scores_ravel.argsort()[::-1]
    bboxes = np.vstack(bboxes_list) / det_scale
   
    kpss = np.vstack(kpss_list) / det_scale
    pre_det = np.hstack((bboxes, scores)).astype(np.float32, copy=False)
    pre_det = pre_det[order, :]
    keep = nms(pre_det,0.4)
    det = pre_det[keep, :]
    
    kpss = kpss[order,:,:]
    kpss = kpss[keep,:,:]
    max_num = 0
    print (max_num,det.shape[0])
    if max_num > 0 and det.shape[0] > max_num:
        area = (det[:, 2] - det[:, 0]) * (det[:, 3] -
                                                    det[:, 1])
        img_center = img.shape[0] // 2, img.shape[1] // 2
        offsets = np.vstack([
                (det[:, 0] + det[:, 2]) / 2 - img_center[1],
                (det[:, 1] + det[:, 3]) / 2 - img_center[0]
            ])
        offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
        if metric=='max':
           values = area
        else:
           values = area - offset_dist_squared * 2.0  # some extra weight on the centering
        bindex = np.argsort(
                values)[::-1]  # some extra weight on the centering
        bindex = bindex[0:max_num]
        det = det[bindex, :]
        if kpss is not None:
           kpss = kpss[bindex, :]
    #self.det_model.detect
    
    bboxes = det  
    kpss  = kpss
    print("bboxes-------------------")   
    print(bboxes)
    print("kpss-------------------") 
    print(kpss)
    '''   
    if bboxes.shape[0] == 0:
        return []
    '''
    ret = []
    for i in range(bboxes.shape[0]):
        bbox = bboxes[i, 0:4]
        det_score = bboxes[i, 4]
        kps = None
        if kpss is not None:
            kps = kpss[i]
        face = Face(bbox=bbox, kps=kps, det_score=det_score)
        '''
        for taskname, model in self.models.items():
           if taskname=='detection':
               continue
           model.get(img, face)
        '''
        #model.get(img, face)
        ret.append(face)
    print("ret---------------")
    print (ret)  
    faces = ret
    img = cv2.imread('./test.jpg')
    rimg = draw_on(img, faces)
    cv2.imwrite("./ldh_output1.jpg", rimg)
    
   
    
    '''       
    #loc, conf, landmarks ,= outputs #获取输出数据
    bboxes, kpss = outputs #获取输出数据 
             
    priors = PriorBox(image_size=(model_height, model_width)) # 获取先验框
    boxes = box_decode(loc.squeeze(0), priors)                # 解码输出数据
    
    # letterbox
    scale = np.array([model_width, model_height,
                      model_width, model_height])
    boxes = boxes * scale // 1  # face box
    boxes[...,0::2] =np.clip((boxes[...,0::2] - offset_x) / aspect_ratio, 0, img_width)  #letterbox
    boxes[...,1::2] =np.clip((boxes[...,1::2] - offset_y) / aspect_ratio, 0, img_height) #letterbox
    scores = conf.squeeze(0)[:, 1]  # 人脸检测的置信度
    landmarks = decode_landm(landmarks.squeeze(
        0), priors)  # face keypoint data
    scale_landmarks = np.array([model_width, model_height, model_width, model_height,
                                model_width, model_height, model_width, model_height,
                                model_width, model_height])
    landmarks = landmarks * scale_landmarks // 1
    landmarks[...,0::2] = np.clip((landmarks[...,0::2] - offset_x) / aspect_ratio, 0, img_width) #letterbox
    landmarks[...,1::2] = np.clip((landmarks[...,1::2] - offset_y) / aspect_ratio, 0, img_height) #letterbox
    
    # 丢弃置信度过低的部分
    inds = np.where(scores > 0.5)[0]
    boxes = boxes[inds]
    landmarks = landmarks[inds]
    scores = scores[inds]

    order = scores.argsort()[::-1]
    boxes = boxes[order]
    landmarks = landmarks[order]
    scores = scores[order]

    # 非极大值抑制
    dets = np.hstack((boxes, scores[:, np.newaxis])).astype(
        np.float32, copy=False)
    keep = nms(dets, 0.2)
    dets = dets[keep, :]
    landmarks = landmarks[keep]
    dets = np.concatenate((dets, landmarks), axis=1)

    # 画框标记
    for data in dets:
        if data[4] < 0.5:
            continue
        #print("face @ (%d %d %d %d) %f"%(data[0], data[1], data[2], data[3], data[4]))
        text = "{:.4f}".format(data[4])
        data = list(map(int, data))
        cv2.rectangle(img, (data[0], data[1]),
                      (data[2], data[3]), (0, 0, 255), 2)
        cx = data[0]
        cy = data[1] + 12
        cv2.putText(img, text, (cx, cy),
                    cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
        # landmarks
        cv2.circle(img, (data[5], data[6]), 1, (0, 0, 255), 5)
        cv2.circle(img, (data[7], data[8]), 1, (0, 255, 255), 5)
        cv2.circle(img, (data[9], data[10]), 1, (255, 0, 255), 5)
        cv2.circle(img, (data[11], data[12]), 1, (0, 255, 0), 5)
        cv2.circle(img, (data[13], data[14]), 1, (255, 0, 0), 5)
    img_path = './result.jpg'
    cv2.imwrite(img_path, img)
    print("save image in", img_path)
    '''
    # 释放
    rknn.release()

至此,人脸识别并标注mark点就OK了

同样的方法,将insightface\python-package中文件放入rknn中,并将model_zoo文件中对于模型的初始化、推理等按照RKNN模型修改也能得到相同的结果

我测试了retinaface和arcface两个模型,将这两个模型对应的python文件进行修改,使用FaceAnalysis也是可以使用的

retinaface模型的python文件

# -*- coding: utf-8 -*-
# @Organization  : insightface.ai
# [url=home.php?mod=space&uid=1315547]@author[/url]  : Jia Guo
# [url=home.php?mod=space&uid=39660]@time[/url]  : 2021-09-18
# [url=home.php?mod=space&uid=665173]@FUNCTION[/url]  : 

from __future__ import division
import datetime
import numpy as np
import onnx
import onnxruntime
import os
import os.path as osp
import cv2
import sys

DATASET_PATH = './dataset.txt'
DEFAULT_QUANT = True

def softmax(z):
    assert len(z.shape) == 2
    s = np.max(z, axis=1)
    s = s[:, np.newaxis] # necessary step to do broadcasting
    e_x = np.exp(z - s)
    div = np.sum(e_x, axis=1)
    div = div[:, np.newaxis] # dito
    return e_x / div

def distance2bbox(points, distance, max_shape=None):
    """Decode distance prediction to bounding box.

    Args:
        points (Tensor): Shape (n, 2), [x, y].
        distance (Tensor): Distance from the given point to 4
            boundaries (left, top, right, bottom).
        max_shape (tuple): Shape of the image.

    Returns:
        Tensor: Decoded bboxes.
    """
    x1 = points[:, 0] - distance[:, 0]
    y1 = points[:, 1] - distance[:, 1]
    x2 = points[:, 0] + distance[:, 2]
    y2 = points[:, 1] + distance[:, 3]
    if max_shape is not None:
        x1 = x1.clamp(min=0, max=max_shape[1])
        y1 = y1.clamp(min=0, max=max_shape[0])
        x2 = x2.clamp(min=0, max=max_shape[1])
        y2 = y2.clamp(min=0, max=max_shape[0])
    return np.stack([x1, y1, x2, y2], axis=-1)

def distance2kps(points, distance, max_shape=None):
    """Decode distance prediction to bounding box.

    Args:
        points (Tensor): Shape (n, 2), [x, y].
        distance (Tensor): Distance from the given point to 4
            boundaries (left, top, right, bottom).
        max_shape (tuple): Shape of the image.

    Returns:
        Tensor: Decoded bboxes.
    """
    preds = []
    for i in range(0, distance.shape[1], 2):
        px = points[:, i%2] + distance[:, i]
        py = points[:, i%2+1] + distance[:, i+1]
        if max_shape is not None:
            px = px.clamp(min=0, max=max_shape[1])
            py = py.clamp(min=0, max=max_shape[0])
        preds.append(px)
        preds.append(py)
    return np.stack(preds, axis=-1)

class RetinaFace:
    def __init__(self, model_file=None, session=None):
        import onnxruntime

        self.model_file = model_file
        self.session = session
        self.taskname = 'detection'
        if self.session is None:
            assert self.model_file is not None
            assert osp.exists(self.model_file)
            self.session = onnxruntime.InferenceSession(self.model_file, None)

        self.center_cache = {}
        self.nms_thresh = 0.4
        self.det_thresh = 0.5
        self._init_vars()

    def _init_vars(self):
        input_cfg = self.session.get_inputs()[0]
        input_shape = input_cfg.shape
        print(input_shape)
        if isinstance(input_shape[2], str):
            self.input_size = None
        else:
            self.input_size = tuple(input_shape[2:4][::-1])
        #print('image_size:', self.image_size)
        input_name = input_cfg.name
        self.input_shape = input_shape
        outputs = self.session.get_outputs()
        output_names = []
        for o in outputs:
            output_names.append(o.name)
        self.input_name = input_name
        self.output_names = output_names
        self.input_mean = 127.5
        self.input_std = 128.0
        print(self.output_names)
        #assert len(outputs)==10 or len(outputs)==15
        self.use_kps = False
        self._anchor_ratio = 1.0
        self._num_anchors = 1
        if len(outputs)==6:
            self.fmc = 3
            self._feat_stride_fpn = [8, 16, 32]
            self._num_anchors = 2
        elif len(outputs)==9:
            self.fmc = 3
            self._feat_stride_fpn = [8, 16, 32]
            self._num_anchors = 2
            self.use_kps = True
        elif len(outputs)==10:
            self.fmc = 5
            self._feat_stride_fpn = [8, 16, 32, 64, 128]
            self._num_anchors = 1
        elif len(outputs)==15:
            self.fmc = 5
            self._feat_stride_fpn = [8, 16, 32, 64, 128]
            self._num_anchors = 1
            self.use_kps = True

    def prepare(self, ctx_id, **kwargs):
        if ctx_id<0:
            self.session.set_providers(['CPUExecutionProvider'])
        nms_thresh = kwargs.get('nms_thresh', None)
        if nms_thresh is not None:
            self.nms_thresh = nms_thresh
        det_thresh = kwargs.get('det_thresh', None)
        if det_thresh is not None:
            self.det_thresh = det_thresh
        input_size = kwargs.get('input_size', None)
        if input_size is not None:
            if self.input_size is not None:
                print('warning: det_size is already set in detection model, ignore')
            else:
                self.input_size = input_size
        from rknn.api import RKNN
        # 创建RKNN对象 
        self.rknn = RKNN()
        # 预处理设置
        print('--> Config model')
        '''
rknn.config(mean_values=[[104, 117, 123]], std_values=[[1, 1, 1]], target_platform="rv1106b",
                quantized_algorithm="normal",dynamic_input=[[[1,3,640,640]]], quant_img_RGB2BGR=True,remove_reshape=True,remove_weight=True,model_pruning=True)  # mmse
        '''
        self.rknn.config(mean_values=[[127.5, 127.5, 127.5]], std_values=[[128.0, 128.0, 128.0]], target_platform="rv1106b",quantized_algorithm="normal",dynamic_input=[[[1,3,640,640]]],quant_img_RGB2BGR=False,remove_reshape=False,remove_weight=False,model_pruning=True)  # mmse
        print('done')
        # 载入模型
        print('--> Loading model')
        ret = self.rknn.load_onnx(model="./det_10g.onnx")#,input_size_list=[[1,3,640,640]])
        if ret != 0:
            print('Load model failed!')
            exit(ret)
        print('done')
        # 创建模型
        print('--> Building model')
        ret = self.rknn.build(do_quantization=True, dataset=DATASET_PATH)#,rknn_batch_size=1)
        if ret != 0:
            print('Build model failed!')
            exit(ret)
        print('done')
        # 初始化运行时环境
        print('--> Init runtime environment')
        ret = self.rknn.init_runtime()
        if ret != 0:
            print('Init runtime environment failed!')
            exit(ret)
        print('done')
        
    def forward(self, img, threshold):
        print("forward")
        scores_list = []
        bboxes_list = []
        kpss_list = []
        input_size = tuple(img.shape[0:2][::-1])
        #net_outs = self.rknn.inference(inputs=[img])
        blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
        #net_outs = self.session.run(self.output_names, {self.input_name : blob})
        infer_img=np.expand_dims(img, 0)
        net_outs = self.rknn.inference(inputs=infer_img)
        # 释放
        self.rknn.release()
        input_height = blob.shape[2]
        input_width = blob.shape[3]
        fmc = self.fmc
        for idx, stride in enumerate(self._feat_stride_fpn):
            scores = net_outs[idx]
            bbox_preds = net_outs[idx+fmc]
            bbox_preds = bbox_preds * stride
            if self.use_kps:
                kps_preds = net_outs[idx+fmc*2] * stride
            height = input_height // stride
            width = input_width // stride
            K = height * width
            key = (height, width, stride)
            if key in self.center_cache:
                anchor_centers = self.center_cache[key]
            else:
                #solution-1, c style:
                #anchor_centers = np.zeros( (height, width, 2), dtype=np.float32 )
                #for i in range(height):
                #    anchor_centers[i, :, 1] = i
                #for i in range(width):
                #    anchor_centers[:, i, 0] = i

                #solution-2:
                #ax = np.arange(width, dtype=np.float32)
                #ay = np.arange(height, dtype=np.float32)
                #xv, yv = np.meshgrid(np.arange(width), np.arange(height))
                #anchor_centers = np.stack([xv, yv], axis=-1).astype(np.float32)

                #solution-3:
                anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
                #print(anchor_centers.shape)

                anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
                if self._num_anchors>1:
                    anchor_centers = np.stack([anchor_centers]*self._num_anchors, axis=1).reshape( (-1,2) )
                if len(self.center_cache)<100:
                    self.center_cache[key] = anchor_centers

            pos_inds = np.where(scores>=threshold)[0]
            bboxes = distance2bbox(anchor_centers, bbox_preds)
            pos_scores = scores[pos_inds]
            pos_bboxes = bboxes[pos_inds]
            scores_list.append(pos_scores)
            bboxes_list.append(pos_bboxes)
            if self.use_kps:
                kpss = distance2kps(anchor_centers, kps_preds)
                #kpss = kps_preds
                kpss = kpss.reshape( (kpss.shape[0], -1, 2) )
                pos_kpss = kpss[pos_inds]
                kpss_list.append(pos_kpss)
        return scores_list, bboxes_list, kpss_list

    def detect(self, img, input_size = None, max_num=0, metric='default'):
        assert input_size is not None or self.input_size is not None
        input_size = self.input_size if input_size is None else input_size
            
        im_ratio = float(img.shape[0]) / img.shape[1]
        model_ratio = float(input_size[1]) / input_size[0]
        if im_ratio>model_ratio:
            new_height = input_size[1]
            new_width = int(new_height / im_ratio)
        else:
            new_width = input_size[0]
            new_height = int(new_width * im_ratio)
        det_scale = float(new_height) / img.shape[0]
        resized_img = cv2.resize(img, (new_width, new_height))
        det_img = np.zeros( (input_size[1], input_size[0], 3), dtype=np.uint8 )
        det_img[:new_height, :new_width, :] = resized_img

        scores_list, bboxes_list, kpss_list = self.forward(det_img, self.det_thresh)

        scores = np.vstack(scores_list)
        scores_ravel = scores.ravel()
        order = scores_ravel.argsort()[::-1]
        bboxes = np.vstack(bboxes_list) / det_scale
        if self.use_kps:
            kpss = np.vstack(kpss_list) / det_scale
        pre_det = np.hstack((bboxes, scores)).astype(np.float32, copy=False)
        pre_det = pre_det[order, :]
        keep = self.nms(pre_det)
        det = pre_det[keep, :]
        if self.use_kps:
            kpss = kpss[order,:,:]
            kpss = kpss[keep,:,:]
        else:
            kpss = None
        if max_num > 0 and det.shape[0] > max_num:
            area = (det[:, 2] - det[:, 0]) * (det[:, 3] -
                                                    det[:, 1])
            img_center = img.shape[0] // 2, img.shape[1] // 2
            offsets = np.vstack([
                (det[:, 0] + det[:, 2]) / 2 - img_center[1],
                (det[:, 1] + det[:, 3]) / 2 - img_center[0]
            ])
            offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
            if metric=='max':
                values = area
            else:
                values = area - offset_dist_squared * 2.0  # some extra weight on the centering
            bindex = np.argsort(
                values)[::-1]  # some extra weight on the centering
            bindex = bindex[0:max_num]
            det = det[bindex, :]
            if kpss is not None:
                kpss = kpss[bindex, :]
        return det, kpss

    def nms(self, dets):
        thresh = self.nms_thresh
        x1 = dets[:, 0]
        y1 = dets[:, 1]
        x2 = dets[:, 2]
        y2 = dets[:, 3]
        scores = dets[:, 4]

        areas = (x2 - x1 + 1) * (y2 - y1 + 1)
        order = scores.argsort()[::-1]

        keep = []
        while order.size > 0:
            i = order[0]
            keep.append(i)
            xx1 = np.maximum(x1[i], x1[order[1:]])
            yy1 = np.maximum(y1[i], y1[order[1:]])
            xx2 = np.minimum(x2[i], x2[order[1:]])
            yy2 = np.minimum(y2[i], y2[order[1:]])

            w = np.maximum(0.0, xx2 - xx1 + 1)
            h = np.maximum(0.0, yy2 - yy1 + 1)
            inter = w * h
            ovr = inter / (areas[i] + areas[order[1:]] - inter)

            inds = np.where(ovr <= thresh)[0]
            order = order[inds + 1]

        return keep

def get_retinaface(name, download=False, root='~/.insightface/models', **kwargs):
    print("get---------------------")
    if not download:
        assert os.path.exists(name)
        return RetinaFace(name)
    else:
        from .model_store import get_model_file
        _file = get_model_file("retinaface_%s" % name, root=root)
        return retinaface(_file)

 

 

arcFace模型python文件

# -*- coding: utf-8 -*-
# @Organization  : insightface.ai
# @Author        : Jia Guo
# @Time          : 2021-05-04
# @Function      : 

from __future__ import division
import numpy as np
import cv2
import onnx
import onnxruntime
from ..utils import face_align

DATASET_PATH = './dataset.txt'
DEFAULT_QUANT = True

__all__ = [
    'ArcFaceONNX',
]


class ArcFaceONNX:
    def __init__(self, model_file=None, session=None):
        assert model_file is not None
        
        self.model_file = model_file
        self.session = session
        self.taskname = 'recognition'
        find_sub = False
        find_mul = False
        model = onnx.load(self.model_file)
        #print(self.model_file)
        graph = model.graph
        for nid, node in enumerate(graph.node[:8]):
            #print(nid, node.name)
            if node.name.startswith('Sub') or node.name.startswith('_minus'):
                find_sub = True
            if node.name.startswith('Mul') or node.name.startswith('_mul'):
                find_mul = True
        if find_sub and find_mul:
            #mxnet arcface model
            input_mean = 0.0
            input_std = 1.0
        else:
            input_mean = 127.5
            input_std = 127.5
        self.input_mean = input_mean
        self.input_std = input_std
        #print('input mean and std:', self.input_mean, self.input_std)
        if self.session is None:
            self.session = onnxruntime.InferenceSession(self.model_file, None)

        input_cfg = self.session.get_inputs()[0]
        input_shape = input_cfg.shape
        input_name = input_cfg.name
        self.input_size = tuple(input_shape[2:4][::-1])
        self.input_shape = input_shape
        outputs = self.session.get_outputs()
        output_names = []
        for out in outputs:
            output_names.append(out.name)
        self.input_name = input_name
        self.output_names = output_names
        assert len(self.output_names)==1
        self.output_shape = outputs[0].shape

    def prepare(self, ctx_id, **kwargs):
        if ctx_id<0:
            self.session.set_providers(['CPUExecutionProvider'])
        from rknn.api import RKNN
        # 创建RKNN对象 
        self.rknn = RKNN()
        # 预处理设置
        print('--> Config model')
        '''
rknn.config(mean_values=[[104, 117, 123]], std_values=[[1, 1, 1]], target_platform="rv1106b",
                quantized_algorithm="normal",dynamic_input=[[[1,3,640,640]]], quant_img_RGB2BGR=True,remove_reshape=True,remove_weight=True,model_pruning=True)  # mmse
        '''
        self.rknn.config(mean_values=[[self.input_mean, self.input_mean, self.input_mean]], std_values=[[self.input_std, self.input_std, self.input_std]], target_platform="rv1106b",quantized_algorithm="normal",dynamic_input=[[[1,3,112,112]]],quant_img_RGB2BGR=False,remove_reshape=False,remove_weight=False,model_pruning=True)  # mmse
        print('done')
        # 载入模型
        print('--> Loading model')
        ret = self.rknn.load_onnx(model=self.model_file)#,input_size_list=[[1,3,640,640]])
        if ret != 0:
            print('Load model failed!')
            exit(ret)
        print('done')
        # 创建模型
        print('--> Building model')
        ret = self.rknn.build(do_quantization=True, dataset=DATASET_PATH)#,rknn_batch_size=1)
        if ret != 0:
            print('Build model failed!')
            exit(ret)
        print('done')
        # 初始化运行时环境
        print('--> Init runtime environment')
        ret = self.rknn.init_runtime()
        if ret != 0:
            print('Init runtime environment failed!')
            exit(ret)
        print('done')

    def get(self, img, face):
        aimg = face_align.norm_crop(img, landmark=face.kps, image_size=self.input_size[0])
        #face.embedding = self.get_feat(aimg).flatten()
        face.embedding = self.get_feat(aimg)
        return face.embedding

    def compute_sim(self, feat1, feat2):
        from numpy.linalg import norm
        feat1 = feat1.ravel()
        feat2 = feat2.ravel()
        sim = np.dot(feat1, feat2) / (norm(feat1) * norm(feat2))
        return sim

    def get_feat(self, imgs):
        '''
        if not isinstance(imgs, list):
            imgs = [imgs]
        input_size = self.input_size
        
        blob = cv2.dnn.blobFromImages(imgs, 1.0 / self.input_std, input_size,
                                      (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
        net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
        '''
        infer_img=np.expand_dims(imgs, 0)
        net_out = self.rknn.inference(inputs=infer_img)
        net_outs=np.array(net_out).flatten()#.ravel()
        
        return net_outs

    def forward(self, batch_data):
        #blob = (batch_data - self.input_mean) / self.input_std
        #net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
        infer_img=np.expand_dims(imgs, 0)
        net_outs = self.rknn.inference(inputs=infer_img)
        return net_out

rknn下insigtface测试文件

import os
import sys
import urllib
import urllib.request
import time
import numpy as np
import cv2

import insightface
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image

from math import ceil
from itertools import product as product

#from rknn.api import RKNN



# 运行
print('--> Running model')
#outputs = rknn.inference(inputs=[infer_img])#, data_format=['nhwc'])

app = FaceAnalysis(allowed_modules=['detection'],providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
print("prepare::::")
app.prepare(ctx_id=0, det_size=(640, 640))
img = ins_get_image('t1')  #不用带后缀,图片放到./insightface/python-package/insightface/data/images
faces = app.get(img)
print("size of faces:", len(faces))

app.prepare(ctx_id=0, det_size=(640, 640))
img1 = ins_get_image('face1')  #不用带后缀,图片放到./insightface/python-package/insightface/data/images
faces1 = app.get(img1)
print("size of faces1:", len(faces1))
#print("faces::::", faces)
#rimg = app.draw_on(img, faces)
#cv2.imwrite("./output1.jpg", rimg)

handler = insightface.model_zoo.get_model('/home/ubuntu/.insightface/models/buffalo_l/w600k_r50.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
handler.prepare(ctx_id=0)
img = ins_get_image('t1')
feature = handler.get(img, faces[0])
#print("size of feature:", len(feature))
print("feature:", feature)
sim = handler.compute_sim(feature,feature)
print("sim:", sim)


img1 = ins_get_image('face1')
feature1 = handler.get(img1, faces1[0])
for i in range(len(faces)):
    feature = handler.get(img, faces[i])
    sim = handler.compute_sim(feature,feature1)
    print("sim:", i,sim)


RK_insightface.7z (21.29 MB, 下载次数: 1)

解压至/rknn-toolkit2/luckfox_rknn/scripts/luckfox_onnx_to_rknn/sim,配置好环境即可在PC上推理

最新回复

您好 请问这个 from image import get_image as ins_get_image中image哪来的哇   详情 回复 发表于 昨天 00:55
点赞 关注

回复
举报

1

帖子

0

TA的资源

一粒金砂(初级)

沙发
 

您好 请问这个

from image import get_image as ins_get_image中image哪来的哇
 
 

回复
您需要登录后才可以回帖 登录 | 注册

随便看看
查找数据手册?

EEWorld Datasheet 技术支持

相关文章 更多>>
推荐帖子
一个ARM7的手册,全部的语言格式和用法

一个ARM7的手册,全部的语言格式和用法

LPC1343学习笔记(连载中)--6月21日新增第十二篇

有幸拿到了EEWORLD论坛的LPC1343评估板,实在是一件意外而激励人心的事情。为感谢EEWORLD和NXP,特将学习过程与大家共同分享。也 ...

2011 TI M3 DAY资料提前放出

明天TI M3 DAY就要开始了,提前放出相关资料,感兴趣的朋友可以预习一下。 本帖最后由 jkhu 于 2011-6-19 23:19 编辑 ]

我的Beaglebone学习历程

整理一下前面发的帖子,搞个总帖,方便大家交流。1.BeagleBone 硬件性能测试 _周计划https://bbs.eeworld.com.cn/thread-324885- ...

GD32F105RBT6和 STM32F103RBT6是完全兼容的,程序也完全兼容吗?

GD32F105RBT6和 STM32F103RBT6是完全兼容的,一个引脚都不差,,,下载程序的接口也一样的,,,是不是可以直接使用原来STM32里 ...

【Silicon Labs 开发套件评测】+SPI flash(MX25R8035F)

在很多应用中,需要保存配置参数。一般都会外接一个存储器来存储,数据比较少会使用EEPROM,使用SPI flash的也比较多,存储空间 ...

【基于NUCLEO-F746ZG电机开发应用】12.参数配置-定时器TIM1配置

在伺服电机的控制过程中,使电机能够按照自己的想法转起来,一定要用到PWM输出控制,但是PWM该如何产生,频率如何控制,占空比 ...

瑞萨CPK-RA6M4 开发板测评----I2C

功能模块的硬件介绍-->I2C I2C(Inter Integrated Circuit)总线是 PHILIPS 公司开发的一种半双工、双向二线制同步串 ...

二极管常用的8个用途

之前有个版本是7中用途 二极管是十分常用的基础元器件,本文主要介绍了二极管的一些作用,比如防反、整流、稳压、续流、检波 ...

visionfive的星光2开发板移植openwrt上篇--编译篇

本帖最后由 怀揣少年梦 于 2023-8-15 23:57 编辑 ###一、openwrt是什么? openwrt是一个常用于路由器嵌入式linux操作系统 ...

关闭
站长推荐上一条 1/9 下一条

 
EEWorld订阅号

 
EEWorld服务号

 
汽车开发圈

About Us 关于我们 客户服务 联系方式 器件索引 网站地图 最新更新 手机版

站点相关: 国产芯 安防电子 汽车电子 手机便携 工业控制 家用电子 医疗电子 测试测量 网络通信 物联网

北京市海淀区中关村大街18号B座15层1530室 电话:(010)82350740 邮编:100190

电子工程世界版权所有 京B2-20211791 京ICP备10001474号-1 电信业务审批[2006]字第258号函 京公网安备 11010802033920号 Copyright © 2005-2025 EEWORLD.com.cn, Inc. All rights reserved
快速回复 返回顶部 返回列表