文章目录

1.关于Javacv~~2. [官网下载最新OpenCV4.8](https://opencv.org/releases/),并解压~~ *不一定要安装opencv*~~3. 将opencv的jar包及动态库dll文件引入项目~~4.pom引入javacv库5.测试5.1 图片美颜5.2 图片人脸检测5.3 提取视频中的语音5.4 音视频剪辑5.5 录屏5.6 推流与流媒体播放 [参考](https://xinchen.blog.csdn.net/article/details/121434969)5.7 摄像头的几个案例 [参考](https://xinchen.blog.csdn.net/article/details/121572093)5.7.1 保存摄像头视频为mp45.7.2 摄像头抓图5.7.3 摄像头推流

5.8 人脸识别训练及预测5.8.1 使用Javacv训练人脸识别模型5.8.2 使用模型预测人脸照片5.8.3 只需要将图片读取人脸改为摄像头抓取即可实现人脸检测并识别

1.关于Javacv

基于opencv实现,用于实现图片、音视频处理,视频捕捉处理;多媒体RTMP、HLS拉流推流; 机器学习如图像识别、人脸识别等业务实现。这些特性可能在python实现得可能更好或更适合,但Javacv感觉还是不错的。

2. 官网下载最新OpenCV4.8,并解压 不一定要安装opencv

3. 将opencv的jar包及动态库dll文件引入项目

① E:\opencv\build\java\opencv-480.jar可以通过maven命令直接安装到本地maven仓库,也可以IDEA settings->project Structure->Libraries-> “+” 入该jar ②E:\opencv\build\java\x64\opencv_java480.dll可以直接copy到动态库搜索路径如C:\Windows\System32或通过环境变量设置或直接在代码中加载该库 System.load("E:\\opencv\\build\\java\\x64\\opencv_java480.dll");

4.pom引入javacv库

org.bytedeco

javacv-platform

1.5.9

5.测试

5.1 图片美颜

package cv;

import org.bytedeco.opencv.opencv_core.Mat;

import java.io.File;

import static org.bytedeco.opencv.global.opencv_imgcodecs.imread;

import static org.bytedeco.opencv.global.opencv_imgcodecs.imwrite;

import static org.bytedeco.opencv.global.opencv_imgproc.bilateralFilter;

public class Meiyan {

public static void main(String[] args) {

Mat result = new Mat();

Mat image = imread("D:\\dayun.jpg");

int level = 18;// 值越大,过滤强度越大

bilateralFilter(image, result, level, level * 2, level / 2);

File out = new File("out.png");

imwrite(out.getPath(), result);

}

}

5.2 图片人脸检测

注意,检查到的人脸会圈出,有些人脸可能检测不到;这里加载人脸检测CascadeClassifier文件是来自opencv安装包或其他地方找一个即可

package cv;

import org.bytedeco.opencv.opencv_core.*;

import org.bytedeco.opencv.opencv_objdetect.CascadeClassifier;

import static org.bytedeco.opencv.global.opencv_imgcodecs.imread;

import static org.bytedeco.opencv.global.opencv_imgcodecs.imwrite;

import static org.bytedeco.opencv.global.opencv_imgproc.LINE_8;

import static org.bytedeco.opencv.global.opencv_imgproc.rectangle;

public class FaceDetector {

public static void main(String[] args) {

// Load the image

Mat image = imread("D://meinv.jpeg");

// Load the face cascade classifier

CascadeClassifier faceCascade = new CascadeClassifier("E:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");

// Detect faces in the image

RectVector faceDetections = new RectVector();

faceCascade.detectMultiScale(image, faceDetections);

// Draw a rectangle around each detected face

for (Rect rect : faceDetections.get()) {

rectangle(image, new Point(rect.x(), rect.y()), new Point(rect.x() + rect.width(), rect.y() + rect.height()),

new Scalar(0, 255, 0, 0), 2, LINE_8, 0);

}

// Save the image with the detected faces

imwrite("face.jpg", image);

}

}

5.3 提取视频中的语音

package cv;

import org.bytedeco.javacv.FFmpegFrameGrabber;

import org.bytedeco.javacv.FFmpegFrameRecorder;

import org.bytedeco.javacv.Frame;

import java.io.File;

import java.util.UUID;

public class MP4ToAudio {

public static void mp4ToAudio(String sourceFilePath) {

System.out.println("提取音频文件");

File file = new File(sourceFilePath);

//抓取资源

FFmpegFrameGrabber frameGrabber1 = new FFmpegFrameGrabber(sourceFilePath);

Frame frame = null;

FFmpegFrameRecorder recorder = null;

String fileName = null;

try {

frameGrabber1.start();

fileName = file.getAbsolutePath() + UUID.randomUUID() + ".mp3";

System.out.println("--文件名-->>" + fileName);

recorder = new FFmpegFrameRecorder(fileName, frameGrabber1.getAudioChannels());

recorder.setFormat("mp3");

recorder.setSampleRate(frameGrabber1.getSampleRate());

recorder.setTimestamp(frameGrabber1.getTimestamp());

recorder.setAudioQuality(0);

recorder.start();

int index = 0;

while (true) {

frame = frameGrabber1.grab();

if (frame == null) {

System.out.println("视频处理完成");

break;

}

if (frame.samples != null) {

recorder.recordSamples(frame.sampleRate, frame.audioChannels, frame.samples);

}

System.out.println("帧值=" + index);

index++;

}

recorder.stop();

recorder.release();

frameGrabber1.stop();

} catch (Exception e) {

e.printStackTrace();

}

}

public static void main(String[] args) {

String sourceFilePath = "D://test.mp4";

mp4ToAudio(sourceFilePath);

}

}

5.4 音视频剪辑

下面使用第三方工具ffmpeg.exe来处理音视频,如果安装了剪映等工具,可以直接找到它的ffmpeg.exe(非Javacv)

package cv;

import java.io.File;

import java.util.ArrayList;

import java.util.Arrays;

import java.util.List;

public class CvCutter {

private static String ffmpegEXE = "F:\\JianYing\\bin\\ffmpeg.exe";//上篇文章视频转换为MP4的云盘有可以直接下载的

private static List VIDEO_LIST = Arrays.asList("mov", "mpg", "wmv", "3gp", "asf", "asx", "avi", "wmv9", "rm", "rmvb", "flv");

private static List AUDIO_LIST = Arrays.asList("mp3", "acm", "wav", "wma", "mp1", "aif");

public static Boolean cutVideoOrAudio(String src, String start, String end, String dest) throws Exception {

File file = new File(dest);

if (file.exists()) {

return false;

}

if (!file.getParentFile().isDirectory()) {

file.getParentFile().mkdirs();

}

List command = getCommonList(src, start, end, dest);

ProcessBuilder builder = new ProcessBuilder();

Process process = builder.command(command).redirectErrorStream(true).start();

process.waitFor();

process.destroy();

return true;

}

public static List getCommonList(String src, String start, String end, String dest) {

String suffix = src.substring(src.lastIndexOf(".") + 1);

List command = new ArrayList<>();

if (VIDEO_LIST.contains(suffix)) {

command.add(ffmpegEXE);

command.add("-ss");

command.add(start);

command.add("-to");

command.add(end);

command.add("-i");

command.add(src);

command.add("-c:v");

command.add("libx264");

command.add("-c:a");

command.add("aac");

command.add("-strict");

command.add("experimental");

command.add("-b:a");

command.add("98k");

command.add(dest);

command.add("-y");

} else if (AUDIO_LIST.contains(suffix)) {

command.add(ffmpegEXE);

command.add("-i");

command.add(src);

command.add("-ss");

command.add(start);

command.add("-to");

command.add(end);

command.add(dest);

command.add("-y");

} else {

throw new RuntimeException("unknown format");

}

return command;

}

public static void main(String[] args) throws Exception {

String input = "D:\\test.mp3";

String out = "D:\\part.mp3";

String suffix = input.substring(input.lastIndexOf(".") + 1);

System.out.println(suffix);

String start = "00:00:10";

String end = "00:00:20";

CvCutter.cutVideoOrAudio(input, start, end, out);

}

}

5.5 录屏

package cv;

import org.bytedeco.ffmpeg.global.avcodec;

import org.bytedeco.javacv.FFmpegFrameRecorder;

import org.bytedeco.javacv.Java2DFrameConverter;

import java.awt.*;

import java.awt.image.BufferedImage;

import java.time.LocalDateTime;

import java.time.temporal.ChronoUnit;

/**

* TODO

*

* @author majun

* @version 1.0

* @since 2023-10-11 20:40

*/

public class ScreenRecord {

/**

* 录屏

* @param filename 文件名称

* @param seconds 时长

*/

public static void recordScreen(String filename, int seconds) {

final int FRAME_RATE = 30;

final Dimension SCREEN_SIZE = Toolkit.getDefaultToolkit().getScreenSize();

// 创建录屏对象,并设置相关属性

FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(filename, SCREEN_SIZE.width, SCREEN_SIZE.height);

recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);

recorder.setFormat("mp4");

recorder.setFrameRate(FRAME_RATE);

Java2DFrameConverter converter = new Java2DFrameConverter();

try {

// 初始化录屏对象

recorder.start();

Robot robot = new Robot();

BufferedImage screenShot;

// 系统当前时间

LocalDateTime now = LocalDateTime.now();

System.out.println(now);

// 30秒后

LocalDateTime plus = now.plus(seconds, ChronoUnit.SECONDS);

System.out.println(plus);

// 开始录制

while (true) {

// 获取屏幕截图并写入文件

screenShot = robot.createScreenCapture(new Rectangle(SCREEN_SIZE));

recorder.record(converter.getFrame(screenShot));

// 停止时间

LocalDateTime time = LocalDateTime.now();

if(plus.isBefore(time)){

System.out.println(time);

break;

}

}

} catch (Exception e) {

e.printStackTrace();

} finally {

// 关闭录制器

try {

recorder.stop();

} catch (Exception e) {

e.printStackTrace();

}

}

}

public static void main(String[] args) {

recordScreen("screen.mp4",10);

}

}

5.6 推流与流媒体播放 参考

首先启动一个流媒体服务器SRS docker run -p 1935:1935 -p 1985:1985 -p 8080:8080 ossrs/srs,然后运行推流代码,最后用VLC流媒体播放器ctrl+N访问rtmp://192.168.72.126:1935/live/livestream(同推流地址)从SRS拉流播放

package cv;

import lombok.extern.slf4j.Slf4j;

import org.bytedeco.ffmpeg.avcodec.AVCodecParameters;

import org.bytedeco.ffmpeg.avformat.AVFormatContext;

import org.bytedeco.ffmpeg.avformat.AVStream;

import org.bytedeco.ffmpeg.global.avcodec;

import org.bytedeco.ffmpeg.global.avutil;

import org.bytedeco.javacv.FFmpegFrameGrabber;

import org.bytedeco.javacv.FFmpegFrameRecorder;

import org.bytedeco.javacv.FFmpegLogCallback;

import org.bytedeco.javacv.Frame;

/**

* @author willzhao

* @version 1.0

* @description 读取指定的mp4文件,推送到SRS服务器

* @date 2021/11/19 8:49

*/

@Slf4j

public class PushMp4 {

private static final String MP4_FILE_PATH = "D://test.mp4";

/**

* SRS的推流地址

*/

private static final String SRS_PUSH_ADDRESS = "rtmp://192.168.72.126:1935/live/livestream";

/**

* 读取指定的mp4文件,推送到SRS服务器

* @param sourceFilePath 视频文件的绝对路径

* @param PUSH_ADDRESS 推流地址

* @throws Exception

*/

private static void grabAndPush(String sourceFilePath, String PUSH_ADDRESS) throws Exception {

// ffmepg日志级别

avutil.av_log_set_level(avutil.AV_LOG_INFO);

FFmpegLogCallback.set();

// 实例化帧抓取器对象,将文件路径传入

FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(MP4_FILE_PATH);

long startTime = System.currentTimeMillis();

log.info("开始初始化帧抓取器");

// 初始化帧抓取器,例如数据结构(时间戳、编码器上下文、帧对象等),

// 如果入参等于true,还会调用avformat_find_stream_info方法获取流的信息,放入AVFormatContext类型的成员变量oc中

grabber.start(true);

log.info("帧抓取器初始化完成,耗时[{}]毫秒", System.currentTimeMillis()-startTime);

// grabber.start方法中,初始化的解码器信息存在放在grabber的成员变量oc中

AVFormatContext avFormatContext = grabber.getFormatContext();

// 文件内有几个媒体流(一般是视频流+音频流)

int streamNum = avFormatContext.nb_streams();

// 没有媒体流就不用继续了

if (streamNum<1) {

log.error("文件内不存在媒体流");

return;

}

// 取得视频的帧率

int frameRate = (int)grabber.getVideoFrameRate();

log.info("视频帧率[{}],视频时长[{}]秒,媒体流数量[{}]",

frameRate,

avFormatContext.duration()/1000000,

avFormatContext.nb_streams());

// 遍历每一个流,检查其类型

for (int i=0; i< streamNum; i++) {

AVStream avStream = avFormatContext.streams(i);

AVCodecParameters avCodecParameters = avStream.codecpar();

log.info("流的索引[{}],编码器类型[{}],编码器ID[{}]", i, avCodecParameters.codec_type(), avCodecParameters.codec_id());

}

// 视频宽度

int frameWidth = grabber.getImageWidth();

// 视频高度

int frameHeight = grabber.getImageHeight();

// 音频通道数量

int audioChannels = grabber.getAudioChannels();

log.info("视频宽度[{}],视频高度[{}],音频通道数[{}]",

frameWidth,

frameHeight,

audioChannels);

// 实例化FFmpegFrameRecorder,将SRS的推送地址传入

FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(SRS_PUSH_ADDRESS,

frameWidth,

frameHeight,

audioChannels);

// 设置编码格式

recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);

// 设置封装格式

recorder.setFormat("flv");

// 一秒内的帧数

recorder.setFrameRate(frameRate);

// 两个关键帧之间的帧数

recorder.setGopSize(frameRate);

// 设置音频通道数,与视频源的通道数相等

recorder.setAudioChannels(grabber.getAudioChannels());

startTime = System.currentTimeMillis();

log.info("开始初始化帧抓取器");

// 初始化帧录制器,例如数据结构(音频流、视频流指针,编码器),

// 调用av_guess_format方法,确定视频输出时的封装方式,

// 媒体上下文对象的内存分配,

// 编码器的各项参数设置

recorder.start();

log.info("帧录制初始化完成,耗时[{}]毫秒", System.currentTimeMillis()-startTime);

Frame frame;

startTime = System.currentTimeMillis();

log.info("开始推流");

long videoTS = 0;

int videoFrameNum = 0;

int audioFrameNum = 0;

int dataFrameNum = 0;

// 假设一秒钟15帧,那么两帧间隔就是(1000/15)毫秒

int interVal = 1000/frameRate;

// 发送完一帧后sleep的时间,不能完全等于(1000/frameRate),不然会卡顿,

// 要更小一些,这里取八分之一

interVal/=8;

// 持续从视频源取帧

while (null!=(frame=grabber.grab())) {

videoTS = 1000 * (System.currentTimeMillis() - startTime);

// 时间戳

recorder.setTimestamp(videoTS);

// 有图像,就把视频帧加一

if (null!=frame.image) {

videoFrameNum++;

}

// 有声音,就把音频帧加一

if (null!=frame.samples) {

audioFrameNum++;

}

// 有数据,就把数据帧加一

if (null!=frame.data) {

dataFrameNum++;

}

// 取出的每一帧,都推送到SRS

recorder.record(frame);

// 停顿一下再推送

Thread.sleep(interVal);

}

log.info("推送完成,视频帧[{}],音频帧[{}],数据帧[{}],耗时[{}]秒",

videoFrameNum,

audioFrameNum,

dataFrameNum,

(System.currentTimeMillis()-startTime)/1000);

// 关闭帧录制器

recorder.close();

// 关闭帧抓取器

grabber.close();

}

public static void main(String[] args) throws Exception {

grabAndPush(MP4_FILE_PATH, SRS_PUSH_ADDRESS);

}

}

5.7 摄像头的几个案例 参考

如果没有摄像头,可以使用手机做摄像头,大致方法是手机安装无他相机,PC安装无他伴侣;手机“关于手机”->狂点系统版本区域打开开发者模式->打开USB调试模式->连接数据线选择“打开文件”,然后手机无法相机进入直播助手,PC无他伴侣选择探测到的手机并点击同步即可。之后的几个案例继承如下抽象基类进行实现

package com.bolingcavalry.grabpush.camera;

import lombok.Getter;

import lombok.extern.slf4j.Slf4j;

import org.bytedeco.ffmpeg.global.avutil;

import org.bytedeco.javacv.*;

import org.bytedeco.opencv.global.opencv_imgproc;

import org.bytedeco.opencv.opencv_core.Mat;

import org.bytedeco.opencv.opencv_core.Scalar;

import java.text.SimpleDateFormat;

import java.util.Date;

/**

* @author will

* @email zq2599@gmail.com

* @date 2021/11/19 8:07 上午

* @description 摄像头应用的基础类,这里面定义了拉流和推流的基本流程,子类只需实现具体的业务方法即可

*/

@Slf4j

public abstract class AbstractCameraApplication {

/**

* 摄像头序号,如果只有一个摄像头,那就是0

*/

protected static final int CAMERA_INDEX = 0;

/**

* 帧抓取器

*/

protected FrameGrabber grabber;

/**

* 输出帧率

*/

@Getter

private final double frameRate = 30;

/**

* 摄像头视频的宽

*/

@Getter

private final int cameraImageWidth = 1280;

/**

* 摄像头视频的高

*/

@Getter

private final int cameraImageHeight = 720;

/**

* 转换器

*/

private final OpenCVFrameConverter.ToIplImage openCVConverter = new OpenCVFrameConverter.ToIplImage();

/**

* 实例化、初始化输出操作相关的资源

*/

protected abstract void initOutput() throws Exception;

/**

* 输出

*/

protected abstract void output(Frame frame) throws Exception;

/**

* 释放输出操作相关的资源

*/

protected abstract void releaseOutputResource() throws Exception;

/**

* 两帧之间的间隔时间

* @return

*/

protected int getInterval() {

// 假设一秒钟15帧,那么两帧间隔就是(1000/15)毫秒

return (int)(1000/ frameRate);

}

/**

* 实例化帧抓取器,默认OpenCVFrameGrabber对象,

* 子类可按需要自行覆盖

* @throws FFmpegFrameGrabber.Exception

*/

protected void instanceGrabber() throws FrameGrabber.Exception {

grabber = new OpenCVFrameGrabber(CAMERA_INDEX);

}

/**

* 用帧抓取器抓取一帧,默认调用grab()方法,

* 子类可以按需求自行覆盖

* @return

*/

protected Frame grabFrame() throws FrameGrabber.Exception {

return grabber.grab();

}

/**

* 初始化帧抓取器

* @throws Exception

*/

protected void initGrabber() throws Exception {

// 实例化帧抓取器

instanceGrabber();

// 摄像头有可能有多个分辨率,这里指定

// 可以指定宽高,也可以不指定反而调用grabber.getImageWidth去获取,

grabber.setImageWidth(cameraImageWidth);

grabber.setImageHeight(cameraImageHeight);

// 开启抓取器

grabber.start();

}

/**

* 预览和输出

* @param grabSeconds 持续时长

* @throws Exception

*/

private void grabAndOutput(int grabSeconds) throws Exception {

// 添加水印时用到的时间工具

SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");

long endTime = System.currentTimeMillis() + 1000L *grabSeconds;

// 两帧输出之间的间隔时间,默认是1000除以帧率,子类可酌情修改

int interVal = getInterval();

// 水印在图片上的位置

org.bytedeco.opencv.opencv_core.Point point = new org.bytedeco.opencv.opencv_core.Point(15, 35);

Frame captureFrame;

Mat mat;

// 超过指定时间就结束循环

while (System.currentTimeMillis()

// 取一帧

captureFrame = grabFrame();

if (null==captureFrame) {

log.error("帧对象为空");

break;

}

// 将帧对象转为mat对象

mat = openCVConverter.convertToMat(captureFrame);

// 在图片上添加水印,水印内容是当前时间,位置是左上角

opencv_imgproc.putText(mat,

simpleDateFormat.format(new Date()),

point,

opencv_imgproc.CV_FONT_VECTOR0,

0.8,

new Scalar(0, 200, 255, 0),

1,

0,

false);

// 子类输出

output(openCVConverter.convert(mat));

// 适当间隔,让肉感感受不到闪屏即可

if(interVal>0) {

Thread.sleep(interVal);

}

}

log.info("输出结束");

}

/**

* 释放所有资源

*/

private void safeRelease() {

try {

// 子类需要释放的资源

releaseOutputResource();

} catch (Exception exception) {

log.error("do releaseOutputResource error", exception);

}

if (null!=grabber) {

try {

grabber.close();

} catch (Exception exception) {

log.error("close grabber error", exception);

}

}

}

/**

* 整合了所有初始化操作

* @throws Exception

*/

private void init() throws Exception {

long startTime = System.currentTimeMillis();

// 设置ffmepg日志级别

avutil.av_log_set_level(avutil.AV_LOG_INFO);

FFmpegLogCallback.set();

// 实例化、初始化帧抓取器

initGrabber();

// 实例化、初始化输出操作相关的资源,

// 具体怎么输出由子类决定,例如窗口预览、存视频文件等

initOutput();

log.info("初始化完成,耗时[{}]毫秒,帧率[{}],图像宽度[{}],图像高度[{}]",

System.currentTimeMillis()-startTime,

frameRate,

cameraImageWidth,

cameraImageHeight);

}

/**

* 执行抓取和输出的操作

*/

public void action(int grabSeconds) {

try {

// 初始化操作

init();

// 持续拉取和推送

grabAndOutput(grabSeconds);

} catch (Exception exception) {

log.error("execute action error", exception);

} finally {

// 无论如何都要释放资源

safeRelease();

}

}

}

5.7.1 保存摄像头视频为mp4

package cv;

import org.bytedeco.ffmpeg.global.avcodec;

import org.bytedeco.javacv.FFmpegFrameRecorder;

import org.bytedeco.javacv.Frame;

import org.bytedeco.javacv.FrameRecorder;

import static org.bytedeco.ffmpeg.global.avutil.AV_PIX_FMT_YUV420P;

/**

* TODO

*

* @author majun

* @version 1.0

* @since 2023-10-11 22:13

*/

public class CameraMp4Recorder extends AbstractCameraApplication{

protected FrameRecorder recorder;

@Override

protected void initOutput() throws Exception {

// 实例化FFmpegFrameRecorder

recorder = new FFmpegFrameRecorder("CameraMp4Recorder.mp4", // 存放文件的位置

getCameraImageWidth(), // 分辨率的宽,与视频源一致

getCameraImageHeight(), // 分辨率的高,与视频源一致

0); // 音频通道,0表示无

// 文件格式

recorder.setFormat("mp4");

// 帧率与抓取器一致

recorder.setFrameRate(getFrameRate());

// 编码格式

recorder.setPixelFormat(AV_PIX_FMT_YUV420P);

// 编码器类型

recorder.setVideoCodec(avcodec.AV_CODEC_ID_MPEG4);

// 视频质量,0表示无损

recorder.setVideoQuality(0);

// 初始化

recorder.start();

}

@Override

protected void output(Frame frame) throws Exception {

recorder.record(frame);

}

@Override

protected void releaseOutputResource() throws Exception {

recorder.close();

}

public static void main(String[] args) {

new CameraMp4Recorder().action(10);

}

}

5.7.2 摄像头抓图

package cv;

import lombok.extern.slf4j.Slf4j;

import org.bytedeco.javacv.Frame;

import org.bytedeco.javacv.Java2DFrameConverter;

import javax.imageio.ImageIO;

import java.awt.image.BufferedImage;

import java.io.FileOutputStream;

/**

* TODO

*

* @author majun

* @version 1.0

* @since 2023-10-11 22:35

*/

@Slf4j

public class CameraImageGraber extends AbstractCameraApplication{

private Java2DFrameConverter converter = new Java2DFrameConverter();

@Override

protected void initOutput() throws Exception {

}

@Override

protected void output(Frame frame) throws Exception {

// 把帧对象转为Image对象

BufferedImage bufferedImage = converter.getBufferedImage(frame);

ImageIO.write(bufferedImage, "jpg", new FileOutputStream(System.currentTimeMillis()+".jpg"));

}

@Override

protected void releaseOutputResource() throws Exception {

}

@Override

protected int getInterval() {

// 每秒1抓

return 1000;

}

public static void main(String[] args) {

// 连续十秒执行抓图操作

new CameraImageGraber().action(10);

}

}

5.7.3 摄像头推流

类似之前的本地mp4推流到SRS

package cv;

import org.bytedeco.ffmpeg.global.avcodec;

import org.bytedeco.javacv.Frame;

import org.bytedeco.javacv.FrameRecorder;

/**

* TODO

*

* @author majun

* @version 1.0

* @since 2023-10-11 22:52

*/

public class CameraPushSRS extends AbstractCameraApplication{

private static final String RECORD_ADDRESS = "rtmp://192.168.72.126:1935/hls/camera";

protected FrameRecorder recorder;

protected long startRecordTime = 0L;

@Override

protected void initOutput() throws Exception {

// 实例化FFmpegFrameRecorder,将SRS的推送地址传入

recorder = FrameRecorder.createDefault(RECORD_ADDRESS, getCameraImageWidth(), getCameraImageHeight());

// 降低启动时的延时,参考

// https://trac.ffmpeg.org/wiki/StreamingGuide)

recorder.setVideoOption("tune", "zerolatency");

// 在视频质量和编码速度之间选择适合自己的方案,包括这些选项:

// ultrafast,superfast, veryfast, faster, fast, medium, slow, slower, veryslow

// ultrafast offers us the least amount of compression (lower encoder

// CPU) at the cost of a larger stream size

// at the other end, veryslow provides the best compression (high

// encoder CPU) while lowering the stream size

// (see: https://trac.ffmpeg.org/wiki/Encode/H.264)

// ultrafast对CPU消耗最低

recorder.setVideoOption("preset", "ultrafast");

// Constant Rate Factor (see: https://trac.ffmpeg.org/wiki/Encode/H.264)

recorder.setVideoOption("crf", "28");

// 2000 kb/s, reasonable "sane" area for 720

recorder.setVideoBitrate(2000000);

// 设置编码格式

recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);

// 设置封装格式

recorder.setFormat("flv");

// FPS (frames per second)

// 一秒内的帧数

recorder.setFrameRate(getFrameRate());

// Key frame interval, in our case every 2 seconds -> 30 (fps) * 2 = 60

// 关键帧间隔

recorder.setGopSize((int)getFrameRate()*2);

// 帧录制器开始初始化

recorder.start();

}

@Override

protected void output(Frame frame) throws Exception {

if (0L==startRecordTime) {

startRecordTime = System.currentTimeMillis();

}

recorder.setTimestamp(1000 * (System.currentTimeMillis()-startRecordTime));

recorder.record(frame);

}

@Override

protected void releaseOutputResource() throws Exception {

recorder.close();

}

@Override

protected int getInterval() {

// 相比本地预览,推流时两帧间隔时间更短

return super.getInterval()/4;

}

public static void main(String[] args) {

new CameraPushSRS().action(10);

}

}

5.8 人脸识别训练及预测

常见的场景就是公司的门禁系统实现:javacv训练员工人脸图片得到模型,摄像头采集到人脸后使用模型进行预测判断是否为公司员工。

xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">

4.0.0

org.springframework.boot

spring-boot-starter-parent

2.7.9

com.example

demo

0.0.1-SNAPSHOT

demo

demo

17

org.springframework.boot

spring-boot-starter

org.projectlombok

lombok

org.springframework.boot

spring-boot-starter-test

test

org.bytedeco

javacv-platform

1.5.9

org.springframework.boot

spring-boot-maven-plugin

5.8.1 使用Javacv训练人脸识别模型

package cv;

import lombok.SneakyThrows;

import org.bytedeco.opencv.global.opencv_imgcodecs;

import org.bytedeco.opencv.opencv_core.Mat;

import org.bytedeco.opencv.opencv_core.MatVector;

import org.bytedeco.opencv.opencv_core.Size;

import org.bytedeco.opencv.opencv_face.FisherFaceRecognizer;

import java.nio.IntBuffer;

import java.nio.file.Files;

import java.nio.file.Paths;

import java.util.Arrays;

import java.util.concurrent.atomic.AtomicInteger;

import static org.bytedeco.opencv.global.opencv_core.CV_32SC1;

import static org.bytedeco.opencv.global.opencv_imgproc.*;

public class Training {

@SneakyThrows

public static void main(String[] args) {

// 网上找的30张刘德华存到D:\\1 30张刘亦菲存到D:\\2 ,图片尽量找质量好一点,找多一些或直接找开源人脸检测人脸识别的数据集

int imageNum = 60;

// 用于存放60张图片矩阵

MatVector images = new MatVector(imageNum);

Mat lables = new Mat(imageNum, 1, CV_32SC1);

IntBuffer lablesBuf = lables.createBuffer();

AtomicInteger counter = new AtomicInteger(0);

// 读取两个文件夹图片矩阵,调整shape,图片灰度化。文件夹名就是训练

for (String dir : Arrays.asList("D:\\1", "D:\\2")) {

Files.list(Paths.get(dir)).map(path -> opencv_imgcodecs.imread(path.toFile().getAbsolutePath(), 1)).forEachOrdered(

mat -> {

Mat resizedMat = new Mat();

resize(mat, resizedMat, new Size(300, 400));// 调整shape,百度图片另存为的那些图片大概就300*400

Mat grayMat = new Mat();

cvtColor(resizedMat, grayMat, COLOR_RGB2GRAY);//灰度

int currentIndex = counter.getAndIncrement();

images.put(currentIndex, grayMat);

lablesBuf.put(currentIndex, Integer.parseInt(dir.substring(dir.length() - 1)));

});

}

//创建人脸分类器,有Fisher、Eigen、LBPH

FisherFaceRecognizer fr = FisherFaceRecognizer.create();

//训练人脸模型

fr.train(images, lables);

//保存训练结果

fr.save("faceRecognize.xml");

fr.close();

}

}

5.8.2 使用模型预测人脸照片

package cv;

import lombok.SneakyThrows;

import org.bytedeco.javacpp.DoublePointer;

import org.bytedeco.javacpp.IntPointer;

import org.bytedeco.javacv.CanvasFrame;

import org.bytedeco.javacv.Frame;

import org.bytedeco.javacv.Java2DFrameConverter;

import org.bytedeco.javacv.OpenCVFrameConverter;

import org.bytedeco.opencv.opencv_core.*;

import org.bytedeco.opencv.opencv_face.FisherFaceRecognizer;

import org.bytedeco.opencv.opencv_objdetect.CascadeClassifier;

import javax.imageio.ImageIO;

import javax.swing.*;

import java.awt.image.BufferedImage;

import java.io.File;

import java.nio.file.Files;

import java.nio.file.Paths;

import java.util.List;

import java.util.Random;

import java.util.stream.Collectors;

import static org.bytedeco.opencv.global.opencv_imgproc.*;

public class Inference {

@SneakyThrows

public static void main(String[] args) {

// 加载模型

FisherFaceRecognizer faceRecognizer = FisherFaceRecognizer.create();

faceRecognizer.read("faceRecognize.xml");

//输入人脸与模型中的人脸(这里是1、2)的欧氏距离?小于设定的阈值才会被判断为该人脸

faceRecognizer.setThreshold(1300.0);

// 新建一个窗口

CanvasFrame canvas = new CanvasFrame("人脸检测");

canvas.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);

OpenCVFrameConverter.ToMat convertor = new OpenCVFrameConverter.ToMat();//用于类型转换

while (canvas.isEnabled()) {

Mat grayImage = new Mat();

Mat face = new Mat();

List toTests = Files.list(Paths.get("D:\\2")).map(path -> path.toFile().getAbsolutePath()).collect(Collectors.toList());

File file = new File(toTests.get(new Random().nextInt(toTests.size())));

BufferedImage image = ImageIO.read(file);

Java2DFrameConverter imageConverter = new Java2DFrameConverter();

Frame imgFrame = imageConverter.convert(image);

//类型转换

OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat();

Mat scr = converter.convertToMat(imgFrame);

cvtColor(scr, grayImage, COLOR_RGB2GRAY);//摄像头是彩色图像,所以先灰度化下

//读取opencv人脸检测器,参考我的路径改为自己的路径

CascadeClassifier cascade = new CascadeClassifier(

"E:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");

//检测人脸

RectVector faces = new RectVector();

cascade.detectMultiScale(grayImage, faces);

IntPointer label = new IntPointer(1);

DoublePointer confidence = new DoublePointer(1);

//识别人脸,一张图可能多个人脸

for (int i = 0; i < faces.size(); i++) {

Rect rect = faces.get(i);

rectangle(scr, rect, new Scalar(0, 255, 0, 1));

// 带框选的灰度图

Mat grayImageWithRectangle = new Mat(grayImage, rect);

resize(grayImageWithRectangle, face, new Size(300, 400));//同训练模型的设定

faceRecognizer.predict(face, label, confidence);

int predictedLabel = label.get(0);//预测结果

System.out.println(predictedLabel);

System.gc(); // 内存使用飙升

//判断预测结果

int pos_x = Math.max(rect.tl().x() - 10, 0);

int pos_y = Math.max(rect.tl().y() - 10, 0);

putText(scr, predictedLabel == 1 ? "LDF" : predictedLabel == 2 ? "LYF" : "Unknown", new Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, new Scalar(0, 255, 0, 2.0));

}

//显示

Frame frame = convertor.convert(scr);

canvas.showImage(frame);// 显示有框选及判断Text的图片到窗口

Thread.sleep(100);//100毫秒刷新一次图像

}

}

}

5.8.3 只需要将图片读取人脸改为摄像头抓取即可实现人脸检测并识别

OpenCVFrameGrabber grabber = new OpenCVFrameGrabber(0);

grabber.setImageWidth(300);

grabber.setImageHeight(400);

grabber.start();

Frame frame=grabber.grab();

好文链接

评论可见,请评论后查看内容,谢谢!!!
 您阅读本篇文章共花了: