From 4452a13ef7b5e991841112b96b3a21b45a2ede63 Mon Sep 17 00:00:00 2001 From: Daniel <1367240116@qq.com> Date: Thu, 19 Dec 2024 15:45:24 +0800 Subject: [PATCH 1/4] add SaverCppNodes for spirecv2-dds, and fix some issues. --- ...mera_calib_1024x768_params_2024-12-08.json | 2 +- spirecv/calib/CameraCalibrationNode.py | 4 +- .../CameraCppNodes/src/sv2_camera_read.cpp | 4 +- spirecv/saver/SaverCppNodes/CMakeLists.txt | 116 ++++ spirecv/saver/SaverCppNodes/SV2SaverDemo.cpp | 30 + .../saver/SaverCppNodes/build_on_jetson.sh | 9 + .../saver/SaverCppNodes/build_on_x86_cuda.sh | 9 + .../saver/SaverCppNodes/build_on_x86_intel.sh | 9 + .../saver/SaverCppNodes/include/sv2_saver.h | 80 +++ .../SaverCppNodes/include/sv2_video_base.h | 57 ++ .../SaverCppNodes/include/sv2_video_saver.h | 34 + .../src/ffmpeg/x86_cuda/bs_common.h | 49 ++ .../src/ffmpeg/x86_cuda/bs_video_saver.cpp | 392 +++++++++++ .../src/ffmpeg/x86_cuda/bs_video_saver.h | 90 +++ .../src/ffmpeg/x86_intel/bs_common.h | 50 ++ .../src/ffmpeg/x86_intel/bs_video_saver.cpp | 655 ++++++++++++++++++ .../src/ffmpeg/x86_intel/bs_video_saver.h | 101 +++ .../src/gstreamer/writer_gstreamer_impl.cpp | 65 ++ .../src/gstreamer/writer_gstreamer_impl.h | 34 + spirecv/saver/SaverCppNodes/src/sv2_saver.cpp | 36 + .../SaverCppNodes/src/sv2_video_base.cpp | 256 +++++++ .../SaverCppNodes/src/sv2_video_saver.cpp | 80 +++ 22 files changed, 2156 insertions(+), 6 deletions(-) create mode 100644 spirecv/saver/SaverCppNodes/CMakeLists.txt create mode 100644 spirecv/saver/SaverCppNodes/SV2SaverDemo.cpp create mode 100755 spirecv/saver/SaverCppNodes/build_on_jetson.sh create mode 100755 spirecv/saver/SaverCppNodes/build_on_x86_cuda.sh create mode 100755 spirecv/saver/SaverCppNodes/build_on_x86_intel.sh create mode 100644 spirecv/saver/SaverCppNodes/include/sv2_saver.h create mode 100644 spirecv/saver/SaverCppNodes/include/sv2_video_base.h create mode 100644 spirecv/saver/SaverCppNodes/include/sv2_video_saver.h create mode 100644 spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_common.h create mode 100644 spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_video_saver.cpp create mode 100644 spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_video_saver.h create mode 100644 spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_common.h create mode 100644 spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_video_saver.cpp create mode 100644 spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_video_saver.h create mode 100644 spirecv/saver/SaverCppNodes/src/gstreamer/writer_gstreamer_impl.cpp create mode 100644 spirecv/saver/SaverCppNodes/src/gstreamer/writer_gstreamer_impl.h create mode 100644 spirecv/saver/SaverCppNodes/src/sv2_saver.cpp create mode 100644 spirecv/saver/SaverCppNodes/src/sv2_video_base.cpp create mode 100644 spirecv/saver/SaverCppNodes/src/sv2_video_saver.cpp diff --git a/params/spirecv2/camera_calib_1024x768_params_2024-12-08.json b/params/spirecv2/camera_calib_1024x768_params_2024-12-08.json index 4b00f8f..5878227 100644 --- a/params/spirecv2/camera_calib_1024x768_params_2024-12-08.json +++ b/params/spirecv2/camera_calib_1024x768_params_2024-12-08.json @@ -8,5 +8,5 @@ "/CalibrationNode/square_size": 24, "/CalibrationNode/imw": 1024, "/CalibrationNode/imh": 768, - "/CalibrationNode/outputFile": "calib.json" + "/CalibrationNode/output_file": "calib.json" } diff --git a/spirecv/calib/CameraCalibrationNode.py b/spirecv/calib/CameraCalibrationNode.py index 6033854..3db19f6 100644 --- a/spirecv/calib/CameraCalibrationNode.py +++ b/spirecv/calib/CameraCalibrationNode.py @@ -176,9 +176,7 @@ class CameraCalibrationNode(threading.Thread, BaseNode): self.queue_pool.append(self.job_queue) self.square_size = self.get_param('square_size', 10) - self.imw = self.get_param('imw', 640) - self.imh = self.get_param('imh', 480) - self.outputFile = self.get_param('outputFile', 'calib.json') + self.outputFile = self.get_param('output_file', 'calib.json') self.start() diff --git a/spirecv/dataloader/CameraCppNodes/src/sv2_camera_read.cpp b/spirecv/dataloader/CameraCppNodes/src/sv2_camera_read.cpp index 00211ec..7143240 100644 --- a/spirecv/dataloader/CameraCppNodes/src/sv2_camera_read.cpp +++ b/spirecv/dataloader/CameraCppNodes/src/sv2_camera_read.cpp @@ -49,8 +49,8 @@ void CameraCppNode::run() calib_msg["width"] = this->image_width; calib_msg["height"] = this->image_height; calib_msg["distortion_model"]= "plumb_bob"; - calib_msg["D"] = this->camera_matrix; - calib_msg["K"] = this->distortion_coefficients; + calib_msg["K"] = this->camera_matrix; + calib_msg["D"] = this->distortion_coefficients; calib_msg["R"] = this->rectification; calib_msg["P"] = this->projection; diff --git a/spirecv/saver/SaverCppNodes/CMakeLists.txt b/spirecv/saver/SaverCppNodes/CMakeLists.txt new file mode 100644 index 0000000..d7919b6 --- /dev/null +++ b/spirecv/saver/SaverCppNodes/CMakeLists.txt @@ -0,0 +1,116 @@ +cmake_minimum_required(VERSION 3.0 FATAL_ERROR) +cmake_policy(SET CMP0054 NEW) + +set(PROJECT_VERSION 0.2.0) +project(SV2SaverDemo VERSION ${PROJECT_VERSION} LANGUAGES CXX) + +add_definitions(-DAPI_EXPORTS) +set(CMAKE_BUILD_TYPE "Release") + + +## JETSON, X86_CUDA, X86_INTEL +message(STATUS "System:${CMAKE_HOST_SYSTEM_PROCESSOR}") +if(NOT DEFINED PLATFORM) + message(FATAL_ERROR "PLATFORM NOT SPECIFIED!") +else() + message(STATUS "PLATFORM: ${PLATFORM}") + if(PLATFORM STREQUAL "JETSON") + add_definitions(-DPLATFORM_JETSON) + option(USE_GSTREAMER "BUILD WITH GSTREAMER." ON) + elseif(PLATFORM STREQUAL "X86_CUDA") + add_definitions(-DPLATFORM_X86_CUDA) + option(USE_FFMPEG "BUILD WITH FFMPEG." ON) + elseif(PLATFORM STREQUAL "X86_INTEL") + add_definitions(-DPLATFORM_X86_INTEL) + option(USE_GSTREAMER "BUILD WITH GSTREAMER." ON) + else() + message(FATAL_ERROR "UNSUPPORTED PLATFORM!") + endif() +endif() + + +if(USE_GSTREAMER) + add_definitions(-DWITH_GSTREAMER) + message(STATUS "GSTREAMER: ON") +endif() + +if(USE_FFMPEG) + add_definitions(-DWITH_FFMPEG) + find_package(fmt REQUIRED) + set(FFMPEG_LIBS libavutil.so libavcodec.so libavformat.so libavdevice.so libavfilter.so libswscale.so) + message(STATUS "WITH_FFMPEG: ON") +endif() + +find_package(SpireMS REQUIRED) + +include_directories(${SpireMS_INCLUDE_DIRS}) +find_package(OpenCV 4 REQUIRED) +message(STATUS "OpenCV library status:") +message(STATUS " version: ${OpenCV_VERSION}") +message(STATUS " libraries: ${OpenCV_LIBS}") +message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") + + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) + + +if(USE_GSTREAMER) + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src/gstreamer) + if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64") + include_directories( + "/usr/include/gstreamer-1.0" + "/usr/local/include/gstreamer-1.0" + "/usr/include/glib-2.0" + "/usr/lib/aarch64-linux-gnu/glib-2.0/include" + ) + elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64") + include_directories( + "/usr/include/gstreamer-1.0" + "/usr/local/include/gstreamer-1.0" + "/usr/include/glib-2.0" + "/usr/lib/x86_64-linux-gnu/glib-2.0/include" + ) + endif() +endif() + + + + +# Public header +set( + public_HEADS + include/sv2_video_base.h + include/sv2_video_saver.h + include/sv2_saver.h + +) + +set( + SRCS + src/sv2_video_base.cpp + src/sv2_video_saver.cpp + src/sv2_saver.cpp +) + +if(USE_FFMPEG) + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src/ffmpeg/x86_cuda) + file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/ffmpeg/x86_cuda/*.cpp) + list(APPEND SRCS ${ALG_SRC_FILES}) +endif() + +if(USE_GSTREAMER) +file(GLOB ALG_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/gstreamer/*.cpp) +list(APPEND SRCS ${ALG_SRC_FILES}) +endif() + + + +add_executable(SV2SaverDemo SV2SaverDemo.cpp ${SRCS}) +target_link_libraries(SV2SaverDemo ${OpenCV_LIBS} ${SpireMS_LIBS}) + + +install(TARGETS SV2SaverDemo + RUNTIME DESTINATION bin +) + + diff --git a/spirecv/saver/SaverCppNodes/SV2SaverDemo.cpp b/spirecv/saver/SaverCppNodes/SV2SaverDemo.cpp new file mode 100644 index 0000000..22ecd89 --- /dev/null +++ b/spirecv/saver/SaverCppNodes/SV2SaverDemo.cpp @@ -0,0 +1,30 @@ +#include +#include +// 包含SpireMS SDK头文件 +#include +#include "sv2_saver.h" + + +using namespace std; + + +int main(int argc, char *argv[]) +{ + std::string job_name = "live"; + std::string config = ""; + + if (argc < 2) + { + std::cout << "Please input SpireCV Config." << std::endl; + } + config = argv[1]; + if (argc > 2) + { + job_name = argv[2]; + } + + sv2::SaverCppNode node(job_name, config); + node.start(); + node.join(); + return 0; +} diff --git a/spirecv/saver/SaverCppNodes/build_on_jetson.sh b/spirecv/saver/SaverCppNodes/build_on_jetson.sh new file mode 100755 index 0000000..65a5735 --- /dev/null +++ b/spirecv/saver/SaverCppNodes/build_on_jetson.sh @@ -0,0 +1,9 @@ +#!/bin/bash -e + +rm -rf build +mkdir build +cd build +cmake .. -DPLATFORM=JETSON +make -j4 +sudo make install + diff --git a/spirecv/saver/SaverCppNodes/build_on_x86_cuda.sh b/spirecv/saver/SaverCppNodes/build_on_x86_cuda.sh new file mode 100755 index 0000000..2b9fd36 --- /dev/null +++ b/spirecv/saver/SaverCppNodes/build_on_x86_cuda.sh @@ -0,0 +1,9 @@ +#!/bin/bash -e + +rm -rf build +mkdir build +cd build +cmake .. -DPLATFORM=X86_CUDA +make -j4 +sudo make install + diff --git a/spirecv/saver/SaverCppNodes/build_on_x86_intel.sh b/spirecv/saver/SaverCppNodes/build_on_x86_intel.sh new file mode 100755 index 0000000..d8132eb --- /dev/null +++ b/spirecv/saver/SaverCppNodes/build_on_x86_intel.sh @@ -0,0 +1,9 @@ +#!/bin/bash -e + +rm -rf build +mkdir build +cd build +cmake .. -DPLATFORM=X86_INTEL +make -j4 +sudo make install + diff --git a/spirecv/saver/SaverCppNodes/include/sv2_saver.h b/spirecv/saver/SaverCppNodes/include/sv2_saver.h new file mode 100644 index 0000000..04d8d58 --- /dev/null +++ b/spirecv/saver/SaverCppNodes/include/sv2_saver.h @@ -0,0 +1,80 @@ +#ifndef __SV2_SAVER__ +#define __SV2_SAVER__ + +#include "sms_core.h" +#include +#include +#include +#include +#include +#include "sv2_video_saver.h" + + +namespace sv2 +{ + + class VideoWriter; + + class SaverCppNode : public sms::BaseNode + { + public: + SaverCppNode( + std::string job_name, + std::string param_file, + std::string ip = "127.0.0.1", + int port = 9094) : sms::BaseNode("SaverCppNode", job_name, param_file, ip, port), + _det_res_sub("/" + job_name + "/detector/image_results", "sensor_msgs::CompressedImage", std::bind(&SaverCppNode::full_res_callback, this, std::placeholders::_1)) + { + // 读取节点参数 + this->_image_width = this->get_param("image_width", 640); + this->_image_height = this->get_param("image_height", 480); + this->_fps = this->get_param("fps", 25); + this->_saver_dir = this->get_param("saver_dir", "/home/amov"); + + + + // 默认设置保存路径"/home/amov/Videos",保存图像尺寸(640,480),帧频25Hz,同步保存检测结果(.svj) + this->_vw.setup(this->_saver_dir, cv::Size(this->_image_width, this->_image_height), this->_fps, true); + + + } + ~SaverCppNode() + { + } + void run(); + + void full_res_callback(nlohmann::json msg) + { + // 放入到阻塞队列中 + { + std::unique_lock lock(this->_full_queue_mtx); + this->_full_queue.push(msg); + } + // 通知主线程取数据 + this->_full_cv.notify_one(); + } + + int _image_width; + int _image_height; + int _fps; + std::string _saver_dir; + + + + + VideoWriter _vw; + + private: + // 订阅话题 + sms::Subscriber _det_res_sub; + + std::mutex _full_queue_mtx; + std::queue _full_queue; + std::condition_variable _full_cv; + + + +}; + +} +#endif diff --git a/spirecv/saver/SaverCppNodes/include/sv2_video_base.h b/spirecv/saver/SaverCppNodes/include/sv2_video_base.h new file mode 100644 index 0000000..5707292 --- /dev/null +++ b/spirecv/saver/SaverCppNodes/include/sv2_video_base.h @@ -0,0 +1,57 @@ +#ifndef __SV2_VIDEO_BASE__ +#define __SV2_VIDEO_BASE__ + +#include +#include +#include +#include +#include +#include +#include +#include "sms_core.h" +// #define X86_PLATFORM +// #define JETSON_PLATFORM + + +namespace sv2 { + + +class VideoWriterBase { +public: + VideoWriterBase(); + ~VideoWriterBase(); + + void setup(std::string file_path, cv::Size size, double fps=25.0, bool with_targets=false); + void write(cv::Mat image, nlohmann::json tgts_json); + void release(); + + cv::Size getSize(); + double getFps(); + std::string getFilePath(); + bool isRunning(); +protected: + virtual bool setupImpl(std::string file_name_); + virtual bool isOpenedImpl(); + virtual void writeImpl(cv::Mat img_); + virtual void releaseImpl(); + void _init(); + void _run(); + + bool _is_running; + cv::Size _image_size; + double _fps; + bool _with_targets; + int _fid; + int _fcnt; + + std::thread _tt; + // cv::VideoWriter _writer; + std::ofstream _targets_ofs; + std::string _file_path; + + std::queue _image_to_write; + std::queue _tgts_to_write; +}; + +} +#endif diff --git a/spirecv/saver/SaverCppNodes/include/sv2_video_saver.h b/spirecv/saver/SaverCppNodes/include/sv2_video_saver.h new file mode 100644 index 0000000..b1c42eb --- /dev/null +++ b/spirecv/saver/SaverCppNodes/include/sv2_video_saver.h @@ -0,0 +1,34 @@ +#ifndef __SV2_VIDEO_SAVER__ +#define __SV2_VIDEO_SAVER__ + +#include +#include +#include +#include +#include +#include "sv2_video_base.h" + +class BsVideoSaver; + +namespace sv2 +{ + class VideoWriterGstreamerImpl; + + class VideoWriter : public VideoWriterBase + { + public: + VideoWriter(); + ~VideoWriter(); + + protected: + bool setupImpl(std::string file_name_); + bool isOpenedImpl(); + void writeImpl(cv::Mat img_); + void releaseImpl(); + + VideoWriterGstreamerImpl *_gstreamer_impl; + BsVideoSaver *_ffmpeg_impl; + }; + +} +#endif diff --git a/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_common.h b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_common.h new file mode 100644 index 0000000..e52666a --- /dev/null +++ b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_common.h @@ -0,0 +1,49 @@ +#pragma once +#include +#include +#include + +// 获取当前系统启动以来的毫秒数 +static int64_t getCurTime() +{ + // tv_sec (s) tv_nsec (ns-纳秒) + struct timespec now; + clock_gettime(CLOCK_MONOTONIC, &now); + return (now.tv_sec * 1000 + now.tv_nsec / 1000000); +} + + + +struct VideoFrame +{ +public: + enum VideoFrameType + { + BGR = 0, + YUV420P, + + }; + // VideoFrame(VideoFrameType type, int width, int height,int size) + VideoFrame(VideoFrameType type, int width, int height) + { + this->type = type; + this->width = width; + this->height = height; + this->size = width*height*3; + this->data = new uint8_t[this->size]; + } + ~VideoFrame() + { + delete[] this->data; + this->data = nullptr; + } + + VideoFrameType type; + int size; + int width; + int height; + uint8_t *data; +}; + + + diff --git a/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_video_saver.cpp b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_video_saver.cpp new file mode 100644 index 0000000..37f74fb --- /dev/null +++ b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_video_saver.cpp @@ -0,0 +1,392 @@ +#include "bs_video_saver.h" + +/* +amov_rtsp +53248e16cc899903cf296df468977c60d7d73aa7 +*/ + +// char av_error[AV_ERROR_MAX_STRING_SIZE] = { 0 }; +// #define av_err2str(errnum) av_make_error_string(av_error, AV_ERROR_MAX_STRING_SIZE, errnum) + +BsVideoSaver::BsVideoSaver() +{ + +} + +BsVideoSaver::~BsVideoSaver() +{ + +} + + +bool BsVideoSaver::setup(std::string name, int width, int height, int fps, std::string encoder, int bitrate = 4) +{ + // 重置状态然后初始化 + this->width = width; + this->height = height; + + // 线程停止 + if(mThread != nullptr) + { + this->stop(); + } + + // 编码器重置 + if (mVideoCodecCtx != NULL) + { + avcodec_free_context(&mVideoCodecCtx); + } + + + if (!this->init(name, width, height, fps, encoder, bitrate)) + { + std::cout << "BsVideoSaver::setup error\n"; + return false; + } + + + std::cout << "BsStreamer::setup Success!\n"; + start(); + return true; +} + +void BsVideoSaver::start() +{ + mThread = new std::thread(BsVideoSaver::encodeVideoAndSaveThread, this); + mThread->native_handle(); + push_running = true; +} + +void BsVideoSaver::stop() +{ + if (mThread != nullptr) + { + push_running = false; + mThread->join(); + mThread = nullptr; + } +} + +bool BsVideoSaver::init(std::string name, int width, int height, int fps, std::string encoder, int bitrate) +{ + // 初始化上下文 + if (avformat_alloc_output_context2(&mFmtCtx, NULL, NULL, name.c_str()) < 0) + { + std::cout << "avformat_alloc_output_context2 error\n"; + return false; + } + + // 初始化视频编码器 + // AVCodec *videoCodec = avcodec_find_encoder(AV_CODEC_ID_H264); + // AVCodec *videoCodec = avcodec_find_encoder_by_name("h264_nvenc"); + + AVCodec *videoCodec = avcodec_find_encoder_by_name(encoder.c_str()); + if (!videoCodec) + { + std::cout << fmt::format("Using encoder:[{}] error!\n", encoder); + videoCodec = avcodec_find_encoder(AV_CODEC_ID_H264); + + if (!videoCodec) + { + std::cout << "avcodec_alloc_context3 error"; + return false; + } + + std::cout << "Using default H264 encoder!\n"; + + } + + mVideoCodecCtx = avcodec_alloc_context3(videoCodec); + if (!mVideoCodecCtx) + { + std::cout << "avcodec_alloc_context3 error"; + return false; + } + + // 压缩视频bit位大小 300kB + int bit_rate = bitrate * 1024 * 1024 * 8; + + // CBR:Constant BitRate - 固定比特率 + mVideoCodecCtx->flags |= AV_CODEC_FLAG_QSCALE; + mVideoCodecCtx->bit_rate = bit_rate; + mVideoCodecCtx->rc_min_rate = bit_rate; + mVideoCodecCtx->rc_max_rate = bit_rate; + mVideoCodecCtx->bit_rate_tolerance = bit_rate; + + mVideoCodecCtx->codec_id = videoCodec->id; + // 不支持AV_PIX_FMT_BGR24直接进行编码 + mVideoCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P; + mVideoCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; + mVideoCodecCtx->width = width; + mVideoCodecCtx->height = height; + mVideoCodecCtx->time_base = {1, fps}; + mVideoCodecCtx->framerate = {fps, 1}; + mVideoCodecCtx->gop_size = 12; + mVideoCodecCtx->max_b_frames = 0; + mVideoCodecCtx->thread_count = 1; + + + AVDictionary *video_codec_options = NULL; + av_dict_set(&video_codec_options, "profile", "main", 0); + // av_dict_set(&video_codec_options, "preset", "superfast", 0); + av_dict_set(&video_codec_options, "tune", "fastdecode", 0); + + if (avcodec_open2(mVideoCodecCtx, videoCodec, &video_codec_options) < 0) + { + std::cout << "avcodec_open2 error\n"; + return false; + } + + mVideoStream = avformat_new_stream(mFmtCtx, videoCodec); + if (!mVideoStream) + { + std::cout << "avformat_new_stream error\n"; + return false; + } + mVideoStream->id = mFmtCtx->nb_streams - 1; + // stream的time_base参数非常重要,它表示将现实中的一秒钟分为多少个时间基, 在下面调用avformat_write_header时自动完成 + avcodec_parameters_from_context(mVideoStream->codecpar, mVideoCodecCtx); + mVideoIndex = mVideoStream->id; + + + // open output url + av_dump_format(mFmtCtx, 0, name.c_str(), 1); + if (!(mFmtCtx->oformat->flags & AVFMT_NOFILE)) + { + int ret = avio_open(&mFmtCtx->pb, name.c_str(), AVIO_FLAG_WRITE); + if ( ret < 0) + { + std::cout << fmt::format("avio_open error url: {}\n", name.c_str()); + // std::cout << fmt::format("ret = {} : {}\n", ret, av_err2str(ret)); + std::cout << fmt::format("ret = {}\n", ret); + return false; + } + } + + AVDictionary *fmt_options = NULL; + av_dict_set(&fmt_options, "bufsize", "1024", 0); + + + mFmtCtx->video_codec_id = mFmtCtx->oformat->video_codec; + mFmtCtx->audio_codec_id = mFmtCtx->oformat->audio_codec; + + // 调用该函数会将所有stream的time_base,自动设置一个值,通常是1/90000或1/1000,这表示一秒钟表示的时间基长度 + if (avformat_write_header(mFmtCtx, &fmt_options) < 0) + { + std::cout << "avformat_write_header error\n"; + return false; + } + + return true; +} + +void BsVideoSaver::encodeVideoAndSaveThread(void* arg) +{ + // PushExecutor *executor = (PushExecutor *)arg; + BsVideoSaver *mBsVideoSaver = (BsVideoSaver *)arg; + int width = mBsVideoSaver->width; + int height = mBsVideoSaver->height; + + // 未编码的视频帧(bgr格式) + VideoFrame *videoFrame = NULL; + // 未编码视频帧队列当前长度 + int videoFrameQSize = 0; + + AVFrame *frame_yuv420p = av_frame_alloc(); + frame_yuv420p->format = mBsVideoSaver->mVideoCodecCtx->pix_fmt; + frame_yuv420p->width = width; + frame_yuv420p->height = height; + + int frame_yuv420p_buff_size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width, height, 1); + uint8_t *frame_yuv420p_buff = (uint8_t *)av_malloc(frame_yuv420p_buff_size); + av_image_fill_arrays( + frame_yuv420p->data, frame_yuv420p->linesize, + frame_yuv420p_buff, + AV_PIX_FMT_YUV420P, + width, height, 1); + + // 编码后的视频帧 + AVPacket *pkt = av_packet_alloc(); + int64_t encodeSuccessCount = 0; + int64_t frameCount = 0; + + int64_t t1 = 0; + int64_t t2 = 0; + int ret = -1; + + while (mBsVideoSaver->push_running) + { + if (mBsVideoSaver->getVideoFrame(videoFrame, videoFrameQSize)) + { + + // frame_bgr 转 frame_yuv420p + mBsVideoSaver->bgr24ToYuv420p(videoFrame->data, width, height, frame_yuv420p_buff); + + frame_yuv420p->pts = frame_yuv420p->pkt_dts = av_rescale_q_rnd( + frameCount, + mBsVideoSaver->mVideoCodecCtx->time_base, + mBsVideoSaver->mVideoStream->time_base, + (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); + + frame_yuv420p->pkt_duration = av_rescale_q_rnd( + 1, + mBsVideoSaver->mVideoCodecCtx->time_base, + mBsVideoSaver->mVideoStream->time_base, + (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); + + frame_yuv420p->pkt_pos = frameCount; + + t1 = getCurTime(); + ret = avcodec_send_frame(mBsVideoSaver->mVideoCodecCtx, frame_yuv420p); + if (ret >= 0) + { + ret = avcodec_receive_packet(mBsVideoSaver->mVideoCodecCtx, pkt); + if (ret >= 0) + { + t2 = getCurTime(); + encodeSuccessCount++; + + pkt->stream_index = mBsVideoSaver->mVideoIndex; + + pkt->pos = frameCount; + pkt->duration = frame_yuv420p->pkt_duration; + + ret = mBsVideoSaver->writePkt(pkt); + + if (ret < 0) + { + std::cout << fmt::format("writePkt : ret = {}\n", ret); + } + } + else + { + // std::cout << fmt::format("avcodec_receive_packet error : ret = {}\n", ret); + } + } + else + { + std::cout << fmt::format("avcodec_send_frame error : ret = {}\n", ret); + } + + frameCount++; + + // 释放资源 + delete videoFrame; + videoFrame = NULL; + } + else + { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + } + } + // std::cout << fmt::format("push_running is false!\n"); + // std::cout << fmt::format("end stream!\n"); + + //写文件尾 + av_write_trailer(mBsVideoSaver->mFmtCtx); + + av_packet_unref(pkt); + pkt = NULL; + + av_free(frame_yuv420p_buff); + frame_yuv420p_buff = NULL; + + av_frame_free(&frame_yuv420p); + // av_frame_unref(frame_yuv420p); + frame_yuv420p = NULL; + +} + +int BsVideoSaver::writePkt(AVPacket* pkt) { + mWritePkt_mtx.lock(); + int ret = av_write_frame(mFmtCtx, pkt); + mWritePkt_mtx.unlock(); + + return ret; + +} + +bool BsVideoSaver::getVideoFrame(VideoFrame *&frame, int &frameQSize) +{ + + mRGB_VideoFrameQ_mtx.lock(); + + if (!mRGB_VideoFrameQ.empty()) + { + frame = mRGB_VideoFrameQ.front(); + mRGB_VideoFrameQ.pop(); + frameQSize = mRGB_VideoFrameQ.size(); + mRGB_VideoFrameQ_mtx.unlock(); + return true; + } + else + { + frameQSize = 0; + mRGB_VideoFrameQ_mtx.unlock(); + return false; + } +} + +void BsVideoSaver::write(cv::Mat& image) +{ + + int size = image.cols * image.rows * image.channels(); + VideoFrame* frame = new VideoFrame(VideoFrame::BGR, image.cols, image.rows); + memcpy(frame->data, image.data, size); + + mRGB_VideoFrameQ_mtx.lock(); + mRGB_VideoFrameQ.push(frame); + mRGB_VideoFrameQ_mtx.unlock(); +} + +bool BsVideoSaver::videoFrameQisEmpty() +{ + return mRGB_VideoFrameQ.empty(); +} + +unsigned char BsVideoSaver::clipValue(unsigned char x, unsigned char min_val, unsigned char max_val) +{ + if (x > max_val) { return max_val; } + else if (x < min_val) { return min_val; } + else { return x; } +} + +bool BsVideoSaver::bgr24ToYuv420p(unsigned char *bgrBuf, int w, int h, unsigned char *yuvBuf) +{ + + unsigned char *ptrY, *ptrU, *ptrV, *ptrRGB; + memset(yuvBuf, 0, w * h * 3 / 2); + ptrY = yuvBuf; + ptrU = yuvBuf + w * h; + ptrV = ptrU + (w * h * 1 / 4); + unsigned char y, u, v, r, g, b; + + for (int j = 0; j < h; ++j) + { + + ptrRGB = bgrBuf + w * j * 3; + for (int i = 0; i < w; i++) + { + b = *(ptrRGB++); + g = *(ptrRGB++); + r = *(ptrRGB++); + + y = (unsigned char)((66 * r + 129 * g + 25 * b + 128) >> 8) + 16; + u = (unsigned char)((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128; + v = (unsigned char)((112 * r - 94 * g - 18 * b + 128) >> 8) + 128; + *(ptrY++) = clipValue(y, 0, 255); + if (j % 2 == 0 && i % 2 == 0) + { + *(ptrU++) = clipValue(u, 0, 255); + } + else + { + if (i % 2 == 0) + { + *(ptrV++) = clipValue(v, 0, 255); + } + } + } + } + return true; +} diff --git a/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_video_saver.h b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_video_saver.h new file mode 100644 index 0000000..223c22a --- /dev/null +++ b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_cuda/bs_video_saver.h @@ -0,0 +1,90 @@ +#pragma once + +#include +#include +#include + + +#include +// #include + +#include +#include +extern "C" +{ +#include +#include +#include +#include +// #include +#include +} + +#include + +#include "bs_common.h" + + +class BsVideoSaver +{ +public: + BsVideoSaver(); + ~BsVideoSaver(); + + // 用于初始化视频推流,仅调用一次 + bool setup(std::string name, int width, int height, int fps, std::string encoder, int bitrate); + // 推流一帧图像,在循环中被调用 + void write(cv::Mat& image); + + + // 连接流媒体服务器 + bool init(std::string name, int width, int height, int fps, std::string encoder, int bitrate); + void start(); + void stop(); + + // 编码视频帧并推流 + static void encodeVideoAndSaveThread(void* arg); + + bool videoFrameQisEmpty(); + + int writePkt(AVPacket *pkt); + + + // 上下文 + AVFormatContext *mFmtCtx = nullptr; + // 视频帧 + AVCodecContext *mVideoCodecCtx = NULL; + AVStream *mVideoStream = NULL; + + + int mVideoIndex = -1; + + +private: + + // 从mRGB_VideoFrameQ里面获取RGBframe + bool getVideoFrame(VideoFrame *&frame, int &frameQSize); + + + // bgr24转yuv420p + unsigned char clipValue(unsigned char x, unsigned char min_val, unsigned char max_val); + bool bgr24ToYuv420p(unsigned char *bgrBuf, int w, int h, unsigned char *yuvBuf); + + int width = -1; + int height = -1; + + + bool push_running = false; + bool nd_push_frame = false; + + // 视频帧 + std::queue mRGB_VideoFrameQ; + std::mutex mRGB_VideoFrameQ_mtx; + + + // 推流锁 + std::mutex mWritePkt_mtx; + std::thread* mThread = nullptr; + + +}; \ No newline at end of file diff --git a/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_common.h b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_common.h new file mode 100644 index 0000000..5655774 --- /dev/null +++ b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_common.h @@ -0,0 +1,50 @@ +#pragma once +#include +#include +#include + +// 获取当前系统启动以来的毫秒数 +static int64_t getCurTime() +{ + // tv_sec (s) tv_nsec (ns-纳秒) + struct timespec now; + clock_gettime(CLOCK_MONOTONIC, &now); + return (now.tv_sec * 1000 + now.tv_nsec / 1000000); +} + + + +struct VideoFrame +{ +public: + enum VideoFrameType + { + BGR = 0, + RGB , + YUV420P, + + }; + // VideoFrame(VideoFrameType type, int width, int height,int size) + VideoFrame(VideoFrameType type, int width, int height) + { + this->type = type; + this->width = width; + this->height = height; + this->size = width*height*3; + this->data = new uint8_t[this->size]; + } + ~VideoFrame() + { + delete[] this->data; + this->data = nullptr; + } + + VideoFrameType type; + int size; + int width; + int height; + uint8_t *data; +}; + + + diff --git a/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_video_saver.cpp b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_video_saver.cpp new file mode 100644 index 0000000..fed6737 --- /dev/null +++ b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_video_saver.cpp @@ -0,0 +1,655 @@ +#include "bs_video_saver.h" + +/* +amov_rtsp +53248e16cc899903cf296df468977c60d7d73aa7 +*/ +BsVideoSaver::BsVideoSaver() +{ +} + +BsVideoSaver::~BsVideoSaver() +{ +} + +static int set_hwframe_ctx(AVCodecContext *ctx, AVBufferRef *hw_device_ctx, int width, int height) +{ + AVBufferRef *hw_frames_ref; + AVHWFramesContext *frames_ctx = NULL; + int err = 0; + + if (!(hw_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx))) + { + fprintf(stderr, "Failed to create VAAPI frame context.\n"); + return -1; + } + frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data); + frames_ctx->format = AV_PIX_FMT_VAAPI; + frames_ctx->sw_format = AV_PIX_FMT_NV12; + frames_ctx->width = width; + frames_ctx->height = height; + frames_ctx->initial_pool_size = 20; + if ((err = av_hwframe_ctx_init(hw_frames_ref)) < 0) + { + // fprintf(stderr, "Failed to initialize VAAPI frame context." + // "Error code: %s\n",av_err2str(err)); + av_buffer_unref(&hw_frames_ref); + return err; + } + ctx->hw_frames_ctx = av_buffer_ref(hw_frames_ref); + if (!ctx->hw_frames_ctx) + err = AVERROR(ENOMEM); + + av_buffer_unref(&hw_frames_ref); + return err; +} + +bool BsVideoSaver::setup(std::string name, int width, int height, int fps, std::string encoder, int bitrate = 4) +{ + // 重置状态然后初始化 + this->width = width; + this->height = height; + + // 线程停止 + if (mThread != nullptr) + { + this->stop(); + } + + // 编码器重置 + if (mVideoCodecCtx != NULL) + { + avcodec_free_context(&mVideoCodecCtx); + } + + if (!this->init(name, width, height, fps, encoder, bitrate)) + { + std::cout << "BsVideoSaver::setup error\n"; + return false; + } + + std::cout << "BsStreamer::setup Success!\n"; + start(); + return true; +} + +void BsVideoSaver::start() +{ + mThread = new std::thread(BsVideoSaver::encodeVideoAndSaveThread, this); + mThread->native_handle(); + push_running = true; +} + +void BsVideoSaver::stop() +{ + if (mThread != nullptr) + { + push_running = false; + mThread->join(); + mThread = nullptr; + } +} + +bool BsVideoSaver::init(std::string name, int width, int height, int fps, std::string encoder, int bitrate) +{ + // 初始化上下文 + if (avformat_alloc_output_context2(&mFmtCtx, NULL, NULL, name.c_str()) < 0) + { + std::cout << "avformat_alloc_output_context2 error\n"; + return false; + } + + // 初始化视频编码器 + // AVCodec *videoCodec = avcodec_find_encoder(AV_CODEC_ID_H264); + // AVCodec *videoCodec = avcodec_find_encoder_by_name("h264_nvenc"); + err = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, + NULL, NULL, 0); + + AVCodec *videoCodec = avcodec_find_encoder_by_name(encoder.c_str()); + if (!videoCodec) + { + // std::cout << fmt::format("Using encoder:[{}] error!\n", encoder); + videoCodec = avcodec_find_encoder(AV_CODEC_ID_H264); + + if (!videoCodec) + { + std::cout << "avcodec_alloc_context3 error"; + return false; + } + + std::cout << "Using default H264 encoder!\n"; + } + + mVideoCodecCtx = avcodec_alloc_context3(videoCodec); + if (!mVideoCodecCtx) + { + std::cout << "avcodec_alloc_context3 error"; + return false; + } + + // 压缩视频bit位大小 300kB + int bit_rate = bitrate * 1024 * 1024 * 8; + + // CBR:Constant BitRate - 固定比特率 + mVideoCodecCtx->flags |= AV_CODEC_FLAG_QSCALE; + mVideoCodecCtx->bit_rate = bit_rate; + mVideoCodecCtx->rc_min_rate = bit_rate; + mVideoCodecCtx->rc_max_rate = bit_rate; + mVideoCodecCtx->bit_rate_tolerance = bit_rate; + + mVideoCodecCtx->codec_id = videoCodec->id; + // 不支持AV_PIX_FMT_BGR24直接进行编码 + mVideoCodecCtx->pix_fmt = AV_PIX_FMT_VAAPI; + mVideoCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; + mVideoCodecCtx->width = width; + mVideoCodecCtx->height = height; + mVideoCodecCtx->time_base = {1, fps}; + mVideoCodecCtx->framerate = {fps, 1}; + mVideoCodecCtx->gop_size = 12; + mVideoCodecCtx->max_b_frames = 0; + mVideoCodecCtx->thread_count = 1; + // mVideoCodecCtx->sample_aspect_ratio = (AVRational){1, 1}; + + AVDictionary *video_codec_options = NULL; + av_dict_set(&video_codec_options, "profile", "main", 0); + // av_dict_set(&video_codec_options, "preset", "superfast", 0); + av_dict_set(&video_codec_options, "tune", "fastdecode", 0); + if ((err = set_hwframe_ctx(mVideoCodecCtx, hw_device_ctx, width, height)) < 0) + { + std::cout << "set_hwframe_ctx error\n"; + return false; + } + + if (avcodec_open2(mVideoCodecCtx, videoCodec, &video_codec_options) < 0) + { + std::cout << "avcodec_open2 error\n"; + return false; + } + + mVideoStream = avformat_new_stream(mFmtCtx, videoCodec); + if (!mVideoStream) + { + std::cout << "avformat_new_stream error\n"; + return false; + } + mVideoStream->id = mFmtCtx->nb_streams - 1; + // stream的time_base参数非常重要,它表示将现实中的一秒钟分为多少个时间基, 在下面调用avformat_write_header时自动完成 + avcodec_parameters_from_context(mVideoStream->codecpar, mVideoCodecCtx); + mVideoIndex = mVideoStream->id; + + // open output url + av_dump_format(mFmtCtx, 0, name.c_str(), 1); + if (!(mFmtCtx->oformat->flags & AVFMT_NOFILE)) + { + int ret = avio_open(&mFmtCtx->pb, name.c_str(), AVIO_FLAG_WRITE); + if (ret < 0) + { + // std::cout << fmt::format("avio_open error url: {}\n", name.c_str()); + // std::cout << fmt::format("ret = {} : {}\n", ret, av_err2str(ret)); + // std::cout << fmt::format("ret = {}\n", ret); + return false; + } + } + + AVDictionary *fmt_options = NULL; + av_dict_set(&fmt_options, "bufsize", "1024", 0); + + mFmtCtx->video_codec_id = mFmtCtx->oformat->video_codec; + mFmtCtx->audio_codec_id = mFmtCtx->oformat->audio_codec; + + // 调用该函数会将所有stream的time_base,自动设置一个值,通常是1/90000或1/1000,这表示一秒钟表示的时间基长度 + if (avformat_write_header(mFmtCtx, &fmt_options) < 0) + { + std::cout << "avformat_write_header error\n"; + return false; + } + + return true; +} + +void BsVideoSaver::encodeVideoAndSaveThread(void *arg) +{ + // PushExecutor *executor = (PushExecutor *)arg; + BsVideoSaver *mBsVideoSaver = (BsVideoSaver *)arg; + int width = mBsVideoSaver->width; + int height = mBsVideoSaver->height; + + // 未编码的视频帧(bgr格式) + VideoFrame *videoFrame = NULL; + // 未编码视频帧队列当前长度 + int videoFrameQSize = 0; + + AVFrame *hw_frame = NULL; + AVFrame *frame_nv12 = av_frame_alloc(); + frame_nv12->format = AV_PIX_FMT_NV12; + frame_nv12->width = width; + frame_nv12->height = height; + + int frame_nv12_buff_size = av_image_get_buffer_size(AV_PIX_FMT_NV12, width, height, 1); + uint8_t *frame_nv12_buff = (uint8_t *)av_malloc(frame_nv12_buff_size); + av_image_fill_arrays( + frame_nv12->data, frame_nv12->linesize, + frame_nv12_buff, + AV_PIX_FMT_NV12, + width, height, 1); + + /* read data into software frame, and transfer them into hw frame */ + // sw_frame->width = width; + // sw_frame->height = height; + // sw_frame->format = AV_PIX_FMT_NV12; + + if (!(hw_frame = av_frame_alloc())) + { + std::cout << "Error while av_frame_alloc().\n"; + } + if (av_hwframe_get_buffer(mBsVideoSaver->mVideoCodecCtx->hw_frames_ctx, hw_frame, 0) < 0) + { + std::cout << "Error while av_hwframe_get_buffer.\n"; + } + if (!hw_frame->hw_frames_ctx) + { + std::cout << "Error while hw_frames_ctx.\n"; + } + + // 编码后的视频帧 + AVPacket *pkt = av_packet_alloc(); + int64_t encodeSuccessCount = 0; + int64_t frameCount = 0; + + int64_t t1 = 0; + int64_t t2 = 0; + int ret = -1; + + while (mBsVideoSaver->push_running) + { + if (mBsVideoSaver->getVideoFrame(videoFrame, videoFrameQSize)) + { + + // frame_bgr 转 frame_nv12 + //mBsVideoSaver->bgr24ToYuv420p(videoFrame->data, width, height, frame_nv12_buff); + mBsVideoSaver->Rgb2NV12(videoFrame->data, 3, width, height, frame_nv12_buff); + // RGB2YUV_NV12(videoFrame->data, frame_nv12_buff, width, height ); + + frame_nv12->pts = frame_nv12->pkt_dts = av_rescale_q_rnd( + frameCount, + mBsVideoSaver->mVideoCodecCtx->time_base, + mBsVideoSaver->mVideoStream->time_base, + (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); + + frame_nv12->pkt_duration = av_rescale_q_rnd( + 1, + mBsVideoSaver->mVideoCodecCtx->time_base, + mBsVideoSaver->mVideoStream->time_base, + (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); + + frame_nv12->pkt_pos = frameCount; + + hw_frame->pts = hw_frame->pkt_dts = av_rescale_q_rnd( + frameCount, + mBsVideoSaver->mVideoCodecCtx->time_base, + mBsVideoSaver->mVideoStream->time_base, + (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); + + hw_frame->pkt_duration = av_rescale_q_rnd( + 1, + mBsVideoSaver->mVideoCodecCtx->time_base, + mBsVideoSaver->mVideoStream->time_base, + (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); + + hw_frame->pkt_pos = frameCount; + + if (av_hwframe_transfer_data(hw_frame, frame_nv12, 0) < 0) + { + std::cout << "Error while transferring frame data to surface.\n"; + } + + t1 = getCurTime(); + ret = avcodec_send_frame(mBsVideoSaver->mVideoCodecCtx, hw_frame); + if (ret >= 0) + { + ret = avcodec_receive_packet(mBsVideoSaver->mVideoCodecCtx, pkt); + if (ret >= 0) + { + t2 = getCurTime(); + encodeSuccessCount++; + + pkt->stream_index = mBsVideoSaver->mVideoIndex; + + pkt->pos = frameCount; + pkt->duration = frame_nv12->pkt_duration; + + ret = mBsVideoSaver->writePkt(pkt); + + if (ret < 0) + { + // std::cout << fmt::format("writePkt : ret = {}\n", ret); + } + } + else + { + // std::cout << fmt::format("avcodec_receive_packet error : ret = {}\n", ret); + } + } + else + { + // std::cout << fmt::format("avcodec_send_frame error : ret = {}\n", ret); + } + + frameCount++; + + // 释放资源 + delete videoFrame; + videoFrame = NULL; + } + else + { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + } + } + // std::cout << fmt::format("push_running is false!\n"); + // std::cout << fmt::format("end stream!\n"); + + // 写文件尾 + av_write_trailer(mBsVideoSaver->mFmtCtx); + + av_packet_unref(pkt); + pkt = NULL; + + av_free(frame_nv12_buff); + frame_nv12_buff = NULL; + + av_frame_free(&frame_nv12); + av_frame_free(&hw_frame); + // av_frame_unref(frame_nv12); + frame_nv12 = NULL; +} + +int BsVideoSaver::writePkt(AVPacket *pkt) +{ + mWritePkt_mtx.lock(); + int ret = av_write_frame(mFmtCtx, pkt); + mWritePkt_mtx.unlock(); + + return ret; +} + +bool BsVideoSaver::getVideoFrame(VideoFrame *&frame, int &frameQSize) +{ + + mRGB_VideoFrameQ_mtx.lock(); + + if (!mRGB_VideoFrameQ.empty()) + { + frame = mRGB_VideoFrameQ.front(); + mRGB_VideoFrameQ.pop(); + frameQSize = mRGB_VideoFrameQ.size(); + mRGB_VideoFrameQ_mtx.unlock(); + return true; + } + else + { + frameQSize = 0; + mRGB_VideoFrameQ_mtx.unlock(); + return false; + } +} + +void BsVideoSaver::write(cv::Mat &image) +{ + + int size = image.cols * image.rows * image.channels(); + VideoFrame *frame = new VideoFrame(VideoFrame::BGR, image.cols, image.rows); + cv::Mat bgr = cv::Mat::zeros(image.size(), CV_8UC3); + cv::cvtColor(image, bgr, cv::COLOR_BGR2RGB); + + memcpy(frame->data, bgr.data, size); + + mRGB_VideoFrameQ_mtx.lock(); + mRGB_VideoFrameQ.push(frame); + mRGB_VideoFrameQ_mtx.unlock(); +} + +bool BsVideoSaver::videoFrameQisEmpty() +{ + return mRGB_VideoFrameQ.empty(); +} + +unsigned char BsVideoSaver::clipValue(unsigned char x, unsigned char min_val, unsigned char max_val) +{ + if (x > max_val) + { + return max_val; + } + else if (x < min_val) + { + return min_val; + } + else + { + return x; + } +} + +bool BsVideoSaver::bgr24ToYuv420p(unsigned char *bgrBuf, int w, int h, unsigned char *yuvBuf) +{ + + unsigned char *ptrY, *ptrU, *ptrV, *ptrRGB; + memset(yuvBuf, 0, w * h * 3 / 2); + ptrY = yuvBuf; + ptrU = yuvBuf + w * h; + ptrV = ptrU + (w * h * 1 / 4); + unsigned char y, u, v, r, g, b; + + for (int j = 0; j < h; ++j) + { + + ptrRGB = bgrBuf + w * j * 3; + for (int i = 0; i < w; i++) + { + b = *(ptrRGB++); + g = *(ptrRGB++); + r = *(ptrRGB++); + + y = (unsigned char)((66 * r + 129 * g + 25 * b + 128) >> 8) + 16; + u = (unsigned char)((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128; + v = (unsigned char)((112 * r - 94 * g - 18 * b + 128) >> 8) + 128; + *(ptrY++) = clipValue(y, 0, 255); + if (j % 2 == 0 && i % 2 == 0) + { + *(ptrU++) = clipValue(u, 0, 255); + } + else + { + if (i % 2 == 0) + { + *(ptrV++) = clipValue(v, 0, 255); + } + } + } + } + return true; +} + + + +//https://software.intel.com/en-us/node/503873 +//YCbCr Color Model: +// The YCbCr color space is used for component digital video and was developed as part of the ITU-R BT.601 Recommendation. YCbCr is a scaled and offset version of the YUV color space. +// The Intel IPP functions use the following basic equations [Jack01] to convert between R'G'B' in the range 0-255 and Y'Cb'Cr' (this notation means that all components are derived from gamma-corrected R'G'B'): +// Y' = 0.257*R' + 0.504*G' + 0.098*B' + 16 +// Cb' = -0.148*R' - 0.291*G' + 0.439*B' + 128 +// Cr' = 0.439*R' - 0.368*G' - 0.071*B' + 128 + + +//Y' = 0.257*R' + 0.504*G' + 0.098*B' + 16 +static float Rgb2Y(float r0, float g0, float b0) +{ + float y0 = 0.257f*r0 + 0.504f*g0 + 0.098f*b0 + 16.0f; + return y0; +} + +//U equals Cb' +//Cb' = -0.148*R' - 0.291*G' + 0.439*B' + 128 +static float Rgb2U(float r0, float g0, float b0) +{ + float u0 = -0.148f*r0 - 0.291f*g0 + 0.439f*b0 + 128.0f; + return u0; +} + +//V equals Cr' +//Cr' = 0.439*R' - 0.368*G' - 0.071*B' + 128 +static float Rgb2V(float r0, float g0, float b0) +{ + float v0 = 0.439f*r0 - 0.368f*g0 - 0.071f*b0 + 128.0f; + return v0; +} + +//Convert two rows from RGB to two Y rows, and one row of interleaved U,V. +//I0 and I1 points two sequential source rows. +//I0 -> rgbrgbrgbrgbrgbrgb... +//I1 -> rgbrgbrgbrgbrgbrgb... +//Y0 and Y1 points two sequential destination rows of Y plane. +//Y0 -> yyyyyy +//Y1 -> yyyyyy +//UV0 points destination rows of interleaved UV plane. +//UV0 -> uvuvuv +static void Rgb2NV12TwoRows(const unsigned char I0[], + const unsigned char I1[], + int step, + const int image_width, + unsigned char Y0[], + unsigned char Y1[], + unsigned char UV0[]) +{ + int x; //Column index + + //Process 4 source pixels per iteration (2 pixels of row I0 and 2 pixels of row I1). + for (x = 0; x < image_width; x += 2) + { + //Load R,G,B elements from first row (and convert to float). + float r00 = (float)I0[x*step + 0]; + float g00 = (float)I0[x*step + 1]; + float b00 = (float)I0[x*step + 2]; + + //Load next R,G,B elements from first row (and convert to float). + float r01 = (float)I0[x*step + step+0]; + float g01 = (float)I0[x*step + step+1]; + float b01 = (float)I0[x*step + step+2]; + + //Load R,G,B elements from second row (and convert to float). + float r10 = (float)I1[x*step + 0]; + float g10 = (float)I1[x*step + 1]; + float b10 = (float)I1[x*step + 2]; + + //Load next R,G,B elements from second row (and convert to float). + float r11 = (float)I1[x*step + step+0]; + float g11 = (float)I1[x*step + step+1]; + float b11 = (float)I1[x*step + step+2]; + + //Calculate 4 Y elements. + float y00 = Rgb2Y(r00, g00, b00); + float y01 = Rgb2Y(r01, g01, b01); + float y10 = Rgb2Y(r10, g10, b10); + float y11 = Rgb2Y(r11, g11, b11); + + //Calculate 4 U elements. + float u00 = Rgb2U(r00, g00, b00); + float u01 = Rgb2U(r01, g01, b01); + float u10 = Rgb2U(r10, g10, b10); + float u11 = Rgb2U(r11, g11, b11); + + //Calculate 4 V elements. + float v00 = Rgb2V(r00, g00, b00); + float v01 = Rgb2V(r01, g01, b01); + float v10 = Rgb2V(r10, g10, b10); + float v11 = Rgb2V(r11, g11, b11); + + //Calculate destination U element: average of 2x2 "original" U elements. + float u0 = (u00 + u01 + u10 + u11)*0.25f; + + //Calculate destination V element: average of 2x2 "original" V elements. + float v0 = (v00 + v01 + v10 + v11)*0.25f; + + //Store 4 Y elements (two in first row and two in second row). + Y0[x + 0] = (unsigned char)(y00 + 0.5f); + Y0[x + 1] = (unsigned char)(y01 + 0.5f); + Y1[x + 0] = (unsigned char)(y10 + 0.5f); + Y1[x + 1] = (unsigned char)(y11 + 0.5f); + + //Store destination U element. + UV0[x + 0] = (unsigned char)(u0 + 0.5f); + + //Store destination V element (next to stored U element). + UV0[x + 1] = (unsigned char)(v0 + 0.5f); + } +} + + +//Convert image I from pixel ordered RGB to NV12 format. +//I - Input image in pixel ordered RGB format +//image_width - Number of columns of I +//image_height - Number of rows of I +//J - Destination "image" in NV12 format. + +//I is pixel ordered RGB color format (size in bytes is image_width*image_height*3): +//RGBRGBRGBRGBRGBRGB +//RGBRGBRGBRGBRGBRGB +//RGBRGBRGBRGBRGBRGB +//RGBRGBRGBRGBRGBRGB +// +//J is in NV12 format (size in bytes is image_width*image_height*3/2): +//YYYYYY +//YYYYYY +//UVUVUV +//Each element of destination U is average of 2x2 "original" U elements +//Each element of destination V is average of 2x2 "original" V elements +// +//Limitations: +//1. image_width must be a multiple of 2. +//2. image_height must be a multiple of 2. +//3. I and J must be two separate arrays (in place computation is not supported). +void BsVideoSaver::Rgb2NV12(const unsigned char I[], int step, + const int image_width, + const int image_height, + unsigned char J[]) +{ + //In NV12 format, UV plane starts below Y plane. + unsigned char *UV = &J[image_width*image_height]; + + //I0 and I1 points two sequential source rows. + const unsigned char *I0; //I0 -> rgbrgbrgbrgbrgbrgb... + const unsigned char *I1; //I1 -> rgbrgbrgbrgbrgbrgb... + + //Y0 and Y1 points two sequential destination rows of Y plane. + unsigned char *Y0; //Y0 -> yyyyyy + unsigned char *Y1; //Y1 -> yyyyyy + + //UV0 points destination rows of interleaved UV plane. + unsigned char *UV0; //UV0 -> uvuvuv + + int y; //Row index + + //In each iteration: process two rows of Y plane, and one row of interleaved UV plane. + for (y = 0; y < image_height; y += 2) + { + I0 = &I[y*image_width*step]; //Input row width is image_width*3 bytes (each pixel is R,G,B). + I1 = &I[(y+1)*image_width*step]; + + Y0 = &J[y*image_width]; //Output Y row width is image_width bytes (one Y element per pixel). + Y1 = &J[(y+1)*image_width]; + + UV0 = &UV[(y/2)*image_width]; //Output UV row - width is same as Y row width. + + //Process two source rows into: Two Y destination row, and one destination interleaved U,V row. + Rgb2NV12TwoRows(I0, + I1, + step, + image_width, + Y0, + Y1, + UV0); + } +} + + + diff --git a/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_video_saver.h b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_video_saver.h new file mode 100644 index 0000000..c56b662 --- /dev/null +++ b/spirecv/saver/SaverCppNodes/src/ffmpeg/x86_intel/bs_video_saver.h @@ -0,0 +1,101 @@ +#pragma once + +#include +#include +#include + + +#include +// #include + +#include +#include +extern "C" +{ +#include +#include +#include +#include +// #include +#include + +#include +#include +} + +#include +#include +#include "bs_common.h" + +using namespace cv; +class BsVideoSaver +{ +public: + BsVideoSaver(); + ~BsVideoSaver(); + + // 用于初始化视频推流,仅调用一次 + bool setup(std::string name, int width, int height, int fps, std::string encoder, int bitrate); + // 推流一帧图像,在循环中被调用 + void write(cv::Mat& image); + + + // 连接流媒体服务器 + bool init(std::string name, int width, int height, int fps, std::string encoder, int bitrate); + void start(); + void stop(); + + // 编码视频帧并推流 + static void encodeVideoAndSaveThread(void* arg); + + bool videoFrameQisEmpty(); + + int writePkt(AVPacket *pkt); + + + // 上下文 + AVFormatContext *mFmtCtx = nullptr; + // 视频帧 + AVCodecContext *mVideoCodecCtx = NULL; + AVStream *mVideoStream = NULL; + + AVBufferRef *hw_device_ctx = NULL; + + int err; + + + int mVideoIndex = -1; + + +private: + + // 从mRGB_VideoFrameQ里面获取RGBframe + bool getVideoFrame(VideoFrame *&frame, int &frameQSize); + + + // bgr24转yuv420p + unsigned char clipValue(unsigned char x, unsigned char min_val, unsigned char max_val); + bool bgr24ToYuv420p(unsigned char *bgrBuf, int w, int h, unsigned char *yuvBuf); + void Rgb2NV12(const unsigned char I[], int step, const int image_width, const int image_height, unsigned char J[]); + // void RGB2YUV_NV12(uint8_t* rgbBufIn, uint8_t* yuvBufOut, int nWidth, int nHeight); + + int width = -1; + int height = -1; + + + + + bool push_running = false; + bool nd_push_frame = false; + + // 视频帧 + std::queue mRGB_VideoFrameQ; + std::mutex mRGB_VideoFrameQ_mtx; + + + // 推流锁 + std::mutex mWritePkt_mtx; + std::thread* mThread = nullptr; + + +}; diff --git a/spirecv/saver/SaverCppNodes/src/gstreamer/writer_gstreamer_impl.cpp b/spirecv/saver/SaverCppNodes/src/gstreamer/writer_gstreamer_impl.cpp new file mode 100644 index 0000000..b67706a --- /dev/null +++ b/spirecv/saver/SaverCppNodes/src/gstreamer/writer_gstreamer_impl.cpp @@ -0,0 +1,65 @@ +#include "writer_gstreamer_impl.h" +#include +#include + + + +namespace sv { + + +VideoWriterGstreamerImpl::VideoWriterGstreamerImpl() +{ +} +VideoWriterGstreamerImpl::~VideoWriterGstreamerImpl() +{ +} + +bool VideoWriterGstreamerImpl::gstreamerSetup(VideoWriterBase* base_, std::string file_name_) +{ + this->_file_path = base_->getFilePath(); + this->_fps = base_->getFps(); + this->_image_size = base_->getSize(); + +#ifdef WITH_GSTREAMER + bool opend = false; +#ifdef PLATFORM_JETSON + std::string pipeline = "appsrc ! videoconvert ! nvvidconv ! video/x-raw(memory:NVMM) ! nvv4l2h264enc ! h264parse ! matroskamux ! filesink location=" + this->_file_path + file_name_ + ".avi"; + opend = this->_writer.open(pipeline, cv::VideoWriter::fourcc('m','p','4','v'), this->_fps, this->_image_size); +#endif +#ifdef PLATFORM_X86_INTEL + std::string pipeline = "appsrc ! videoconvert ! vaapipostproc ! vaapih264enc ! h264parse ! matroskamux ! filesink location=" + this->_file_path + file_name_ + ".avi"; + opend = this->_writer.open(pipeline, cv::VideoWriter::fourcc('m','p','4','v'), this->_fps, this->_image_size); +#else + opend = this->_writer.open(this->_file_path + file_name_ + ".avi", cv::VideoWriter::fourcc('x','v','i','d'), this->_fps, this->_image_size); +#endif + return opend; +#endif + return false; +} + +bool VideoWriterGstreamerImpl::gstreamerIsOpened() +{ +#ifdef WITH_GSTREAMER + return this->_writer.isOpened(); +#endif + return false; +} + +void VideoWriterGstreamerImpl::gstreamerWrite(cv::Mat img_) +{ +#ifdef WITH_GSTREAMER + this->_writer << img_; +#endif +} + +void VideoWriterGstreamerImpl::gstreamerRelease() +{ +#ifdef WITH_GSTREAMER + if (this->_writer.isOpened()) + this->_writer.release(); +#endif +} + + +} + diff --git a/spirecv/saver/SaverCppNodes/src/gstreamer/writer_gstreamer_impl.h b/spirecv/saver/SaverCppNodes/src/gstreamer/writer_gstreamer_impl.h new file mode 100644 index 0000000..2e17a8f --- /dev/null +++ b/spirecv/saver/SaverCppNodes/src/gstreamer/writer_gstreamer_impl.h @@ -0,0 +1,34 @@ +#ifndef __SV2_WRITER_GSTREAMER_IMPL__ +#define __SV2_WRITER_GSTREAMER_IMPL__ + +#include +#include +#include + + +namespace sv { + + +class VideoWriterGstreamerImpl +{ +public: + VideoWriterGstreamerImpl(); + ~VideoWriterGstreamerImpl(); + + bool gstreamerSetup(VideoWriterBase* base_, std::string file_name_); + bool gstreamerIsOpened(); + void gstreamerWrite(cv::Mat img_); + void gstreamerRelease(); + + std::string _file_path; + double _fps; + cv::Size _image_size; + +#ifdef WITH_GSTREAMER + cv::VideoWriter _writer; +#endif +}; + + +} +#endif diff --git a/spirecv/saver/SaverCppNodes/src/sv2_saver.cpp b/spirecv/saver/SaverCppNodes/src/sv2_saver.cpp new file mode 100644 index 0000000..9439c33 --- /dev/null +++ b/spirecv/saver/SaverCppNodes/src/sv2_saver.cpp @@ -0,0 +1,36 @@ +#include +#include +#include "sv2_saver.h" +#include +#include +#include +#include + + +using namespace std; + + +namespace sv2 +{ + + void SaverCppNode::run() + { + while (this->is_running()) + { + nlohmann::json image_json; + nlohmann::json tgts_json; + + // 主线程阻塞,等待队里中出现数据 + std::unique_lock lock(this->_full_queue_mtx); + this->_full_cv.wait(lock, [this] + { return !this->_full_queue.empty(); }); + + image_json = this->_full_queue.front(); + this->_full_queue.pop(); + cv::Mat image = sms::sms2cvimg(image_json); + tgts_json = image_json["spirecv_msgs::2DTargets"]; + this->_vw.write(image, tgts_json); + + } + } +} diff --git a/spirecv/saver/SaverCppNodes/src/sv2_video_base.cpp b/spirecv/saver/SaverCppNodes/src/sv2_video_base.cpp new file mode 100644 index 0000000..dbe2f3b --- /dev/null +++ b/spirecv/saver/SaverCppNodes/src/sv2_video_base.cpp @@ -0,0 +1,256 @@ +#include "sv2_video_base.h" +#include + +#define SV2_MAX_FRAMES 52000 +typedef unsigned char byte; + + +namespace sv2 { + + +cv::Ptr _g_dict = nullptr; + + +cv::Mat& _attach_aruco(int id, cv::Mat& img) +{ + cv::Mat marker_img; + std::vector ch(3); + cv::aruco::Dictionary dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_5X5_1000); + ch[0] = cv::Mat::zeros(22, 22, CV_8UC1); + ch[1] = cv::Mat::zeros(22, 22, CV_8UC1); + ch[2] = cv::Mat::zeros(22, 22, CV_8UC1); + + ch[0].setTo(cv::Scalar(255)); + ch[1].setTo(cv::Scalar(255)); + ch[2].setTo(cv::Scalar(255)); + cv::Rect inner_roi = cv::Rect(4, 4, 14, 14); + cv::Rect full_roi = cv::Rect(img.cols - 22, img.rows - 22, 22, 22); + + int id_k = id % 1000; + + // dict.drawMarker(id_k, 14, marker_img, 1); + cv::aruco::generateImageMarker(dict, id_k, 14, marker_img, 1); + marker_img.copyTo(ch[0](inner_roi)); + // dict.drawMarker(id_k, 14, marker_img, 1); + cv::aruco::generateImageMarker(dict, id_k, 14, marker_img, 1); + marker_img.copyTo(ch[1](inner_roi)); + // dict.drawMarker(id_k, 14, marker_img, 1); + cv::aruco::generateImageMarker(dict, id_k, 14, marker_img, 1); + marker_img.copyTo(ch[2](inner_roi)); + + cv::merge(ch, marker_img); + marker_img.copyTo(img(full_roi)); + return img; +} + +int _parse_aruco(cv::Mat& img) +{ + int id; + cv::Mat marker_img; + std::vector ch(3); + if (_g_dict == nullptr) + { + _g_dict = new cv::aruco::Dictionary; + *_g_dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_5X5_1000); + } + cv::Rect full_roi = cv::Rect(img.cols - 22, img.rows - 22, 22, 22); + img(full_roi).copyTo(marker_img); + cv::split(marker_img, ch); + + std::vector id_i; + std::vector id_k; + std::vector id_m; + std::vector > marker_corners; + cv::aruco::detectMarkers(ch[0], _g_dict, marker_corners, id_i); + cv::aruco::detectMarkers(ch[1], _g_dict, marker_corners, id_k); + cv::aruco::detectMarkers(ch[2], _g_dict, marker_corners, id_m); + if (id_i.size() > 0 || id_k.size() > 0 || id_m.size() > 0) + { + if (id_i.size() > 0) + id = id_i[0]; + else if (id_k.size() > 0) + id = id_k[0]; + else if (id_m.size() > 0) + id = id_m[0]; + } + else + { + // std::cout << "error ch0 & ch1" << std::endl; + id = -1; + } + + return id; +} + + + +VideoWriterBase::VideoWriterBase() +{ + this->_is_running = false; + this->_fid = 0; + this->_fcnt = 0; +} +VideoWriterBase::~VideoWriterBase() +{ + this->release(); + // this->_tt.join(); +} +cv::Size VideoWriterBase::getSize() +{ + return this->_image_size; +} +double VideoWriterBase::getFps() +{ + return this->_fps; +} +std::string VideoWriterBase::getFilePath() +{ + return this->_file_path; +} +bool VideoWriterBase::isRunning() +{ + return this->_is_running; +} +void VideoWriterBase::setup(std::string file_path, cv::Size size, double fps, bool with_targets) +{ + this->_file_path = file_path; + this->_fps = fps; + this->_image_size = size; + this->_with_targets = with_targets; + + this->_init(); + + this->_tt = std::thread(&VideoWriterBase::_run, this); + this->_tt.detach(); +} +void VideoWriterBase::write(cv::Mat image, nlohmann::json tgts_json) +{ + if (this->_is_running) + { + cv::Mat image_put; + if (this->_image_size.height == image.rows && this->_image_size.width == image.cols) + { + image.copyTo(image_put); + } + else + { + char msg[256]; + sprintf(msg, "SpireCV 2.0 (106) Input image SIZE (%d, %d) != Saving SIZE (%d, %d)!", image.cols, image.rows, this->_image_size.width, this->_image_size.height); + throw std::runtime_error(msg); + // cv::resize(image, image_put, this->_image_size); + } + + if (this->_targets_ofs) + { + this->_fid ++; + image_put = _attach_aruco(this->_fid, image_put); + tgts_json["img_id"] = this->_fid; + this->_tgts_to_write.push(tgts_json); + if (this->_fid >= SV2_MAX_FRAMES) + this->_fid = 0; + } + this->_image_to_write.push(image_put); + } +} +void VideoWriterBase::_run() +{ + while (this->_is_running && isOpenedImpl()) + { + while (!this->_image_to_write.empty()) + { + this->_fcnt ++; + + cv::Mat img = _image_to_write.front(); + if (this->_targets_ofs) + { + if (!this->_tgts_to_write.empty()) + { + //nlohmann::json tgts = this->_tgts_to_write.front(); + + std::string json_str = this->_tgts_to_write.front(); + _targets_ofs << json_str << std::endl; + + this->_tgts_to_write.pop(); + } + } + // this->_writer << img; + writeImpl(img); + this->_image_to_write.pop(); + + if (this->_fcnt >= SV2_MAX_FRAMES) + { + _init(); + } + } + std::this_thread::sleep_for(std::chrono::milliseconds(int(1000 / this->_fps))); + } +} + +void VideoWriterBase::_init() +{ + this->release(); + + // get now time + time_t t = time(NULL);; + tm* local = localtime(&t); + + char s_buf[128]; + strftime(s_buf, 64, "/FlyVideo_%Y-%m-%d_%H-%M-%S", local); + std::string name = std::string(s_buf); + + bool opend = false; + opend = setupImpl(name); + + if (!opend) + { + std::cout << "Failed to write video: " << _file_path + name << std::endl; + } + else + { + this->_is_running = true; + if (this->_with_targets) + { + this->_targets_ofs.open(this->_file_path + name + ".svj"); + if (!this->_targets_ofs) + { + std::cout << "Failed to write info file: " << this->_file_path << std::endl; + this->_is_running = false; + } + } + } +} +void VideoWriterBase::release() +{ + this->_is_running = false; + this->_fid = 0; + this->_fcnt = 0; + + if (this->_targets_ofs.is_open()) + this->_targets_ofs.close(); + + while (!this->_image_to_write.empty()) + this->_image_to_write.pop(); + while (!this->_tgts_to_write.empty()) + this->_tgts_to_write.pop(); + + releaseImpl(); +} +bool VideoWriterBase::setupImpl(std::string file_name_) +{ + return false; +} +bool VideoWriterBase::isOpenedImpl() +{ + return false; +} +void VideoWriterBase::writeImpl(cv::Mat img_) +{ + +} +void VideoWriterBase::releaseImpl() +{ + +} + +} + diff --git a/spirecv/saver/SaverCppNodes/src/sv2_video_saver.cpp b/spirecv/saver/SaverCppNodes/src/sv2_video_saver.cpp new file mode 100644 index 0000000..e8a0b1a --- /dev/null +++ b/spirecv/saver/SaverCppNodes/src/sv2_video_saver.cpp @@ -0,0 +1,80 @@ +#include "sv2_video_saver.h" +#include +#include +#ifdef WITH_GSTREAMER +#include "writer_gstreamer_impl.h" +#endif +#ifdef WITH_FFMPEG +#include "bs_video_saver.h" +#endif + + +namespace sv2 { + + +VideoWriter::VideoWriter() +{ +#ifdef WITH_GSTREAMER + this->_gstreamer_impl = new VideoWriterGstreamerImpl; +#endif +#ifdef WITH_FFMPEG + this->_ffmpeg_impl = new BsVideoSaver; +#endif +} +VideoWriter::~VideoWriter() +{ +} + +bool VideoWriter::setupImpl(std::string file_name_) +{ + cv::Size img_sz = this->getSize(); + double fps = this->getFps(); + std::string file_path = this->getFilePath(); + +#ifdef WITH_GSTREAMER + return this->_gstreamer_impl->gstreamerSetup(this, file_name_); +#endif +#ifdef WITH_FFMPEG +#if defined(PLATFORM_X86_CUDA) + std::string enc = "h264_nvenc"; +#elif defined(PLATFORM_X86_INTEL) + std::string enc = "h264_vaapi"; +#endif + return this->_ffmpeg_impl->setup(file_path + file_name_ + ".avi", img_sz.width, img_sz.height, (int)fps, enc, 4); +#endif + return false; +} + +bool VideoWriter::isOpenedImpl() +{ +#ifdef WITH_GSTREAMER + return this->_gstreamer_impl->gstreamerIsOpened(); +#endif +#ifdef WITH_FFMPEG + return this->isRunning(); +#endif + return false; +} + +void VideoWriter::writeImpl(cv::Mat img_) +{ +#ifdef WITH_GSTREAMER + this->_gstreamer_impl->gstreamerWrite(img_); +#endif +#ifdef WITH_FFMPEG + this->_ffmpeg_impl->write(img_); +#endif +} + +void VideoWriter::releaseImpl() +{ +#ifdef WITH_GSTREAMER + this->_gstreamer_impl->gstreamerRelease(); +#endif +#ifdef WITH_FFMPEG + this->_ffmpeg_impl->stop(); +#endif +} + +} + -- Gitee From ce2866935291225e4d95cfb36bb7354f0aa84060 Mon Sep 17 00:00:00 2001 From: Daniel <1367240116@qq.com> Date: Thu, 19 Dec 2024 15:54:12 +0800 Subject: [PATCH 2/4] fix one argc for SaverCppNode. --- spirecv/saver/SaverCppNodes/SV2SaverDemo.cpp | 8 +++++--- spirecv/saver/SaverCppNodes/include/sv2_saver.h | 3 ++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/spirecv/saver/SaverCppNodes/SV2SaverDemo.cpp b/spirecv/saver/SaverCppNodes/SV2SaverDemo.cpp index 22ecd89..3b6321c 100644 --- a/spirecv/saver/SaverCppNodes/SV2SaverDemo.cpp +++ b/spirecv/saver/SaverCppNodes/SV2SaverDemo.cpp @@ -11,19 +11,21 @@ using namespace std; int main(int argc, char *argv[]) { std::string job_name = "live"; - std::string config = ""; + std::string config = ""; + std::string specified_input_topic = ""; if (argc < 2) { std::cout << "Please input SpireCV Config." << std::endl; } config = argv[1]; - if (argc > 2) + if (argc > 3) { job_name = argv[2]; + specified_input_topic = argv[3]; } - sv2::SaverCppNode node(job_name, config); + sv2::SaverCppNode node(job_name, config, specified_input_topic); node.start(); node.join(); return 0; diff --git a/spirecv/saver/SaverCppNodes/include/sv2_saver.h b/spirecv/saver/SaverCppNodes/include/sv2_saver.h index 04d8d58..6bf7f61 100644 --- a/spirecv/saver/SaverCppNodes/include/sv2_saver.h +++ b/spirecv/saver/SaverCppNodes/include/sv2_saver.h @@ -21,9 +21,10 @@ namespace sv2 SaverCppNode( std::string job_name, std::string param_file, + std::string specified_input_topic, std::string ip = "127.0.0.1", int port = 9094) : sms::BaseNode("SaverCppNode", job_name, param_file, ip, port), - _det_res_sub("/" + job_name + "/detector/image_results", "sensor_msgs::CompressedImage", std::bind(&SaverCppNode::full_res_callback, this, std::placeholders::_1)) + _det_res_sub("/" + job_name + "/" + specified_input_topic, "sensor_msgs::CompressedImage", std::bind(&SaverCppNode::full_res_callback, this, std::placeholders::_1)) { // 读取节点参数 this->_image_width = this->get_param("image_width", 640); -- Gitee From d4a99d2f9d757b4d9e5476104d530901336757b8 Mon Sep 17 00:00:00 2001 From: Zhang__Ling Date: Mon, 23 Dec 2024 18:47:45 +0800 Subject: [PATCH 3/4] Adjust the code format, and add an initial parameter value for AFOBNodes. --- .../AutoFocusDetCppNodes/sv2_autofocus_det.h | 2 +- spirecv/algorithm/mot/sv2_mot.h | 1 - spirecv/calib/CameraCalibrationNode.py | 4 -- .../CameraCppNodes/include/sv2_camera_base.h | 4 -- .../CameraCppNodes/include/sv2_camera_input.h | 5 -- .../CameraCppNodes/include/sv2_camera_read.h | 2 +- .../CameraCppNodes/src/sv2_camera_base.cpp | 6 +- .../CameraCppNodes/src/sv2_camera_read.cpp | 55 +++++++++---------- 8 files changed, 29 insertions(+), 50 deletions(-) diff --git a/spirecv/algorithm/autofocus_det/AutoFocusDetCppNodes/sv2_autofocus_det.h b/spirecv/algorithm/autofocus_det/AutoFocusDetCppNodes/sv2_autofocus_det.h index fc55de3..41878c5 100644 --- a/spirecv/algorithm/autofocus_det/AutoFocusDetCppNodes/sv2_autofocus_det.h +++ b/spirecv/algorithm/autofocus_det/AutoFocusDetCppNodes/sv2_autofocus_det.h @@ -94,7 +94,7 @@ private: bool _keep_unlocked; bool _use_square_region; - bool _locked; + bool _locked = false; int _lock_thres; int _unlock_thres; int _lock_count; diff --git a/spirecv/algorithm/mot/sv2_mot.h b/spirecv/algorithm/mot/sv2_mot.h index 5d0f3a1..3e5c85e 100644 --- a/spirecv/algorithm/mot/sv2_mot.h +++ b/spirecv/algorithm/mot/sv2_mot.h @@ -107,7 +107,6 @@ namespace sv2 std::condition_variable _full_cv; - }; } diff --git a/spirecv/calib/CameraCalibrationNode.py b/spirecv/calib/CameraCalibrationNode.py index b347a45..196294e 100644 --- a/spirecv/calib/CameraCalibrationNode.py +++ b/spirecv/calib/CameraCalibrationNode.py @@ -173,13 +173,9 @@ class CameraCalibrationNode(threading.Thread, BaseNode): self.logger = Logger(self.__class__.__name__) self.square_size = self.get_param('square_size', 10) -<<<<<<< HEAD - self.outputFile = self.get_param('output_file', 'calib.json') -======= self.output_file = self.get_param('output_file', 'calib.json') self.logger.info("square_size: " + str(self.square_size)) self.logger.info("output_file: " + self.output_file) ->>>>>>> 6cf75b18f3e5ab561e6ae46cc71c929301387b7d self.start() diff --git a/spirecv/dataloader/CameraCppNodes/include/sv2_camera_base.h b/spirecv/dataloader/CameraCppNodes/include/sv2_camera_base.h index 93fb52c..cb1b2ad 100644 --- a/spirecv/dataloader/CameraCppNodes/include/sv2_camera_base.h +++ b/spirecv/dataloader/CameraCppNodes/include/sv2_camera_base.h @@ -9,9 +9,6 @@ #include #include - - -#define SV_RAD2DEG 57.2957795 // #define X86_PLATFORM // #define JETSON_PLATFORM @@ -19,7 +16,6 @@ namespace sv2 { - enum class CameraType {NONE, WEBCAM, V4L2CAM, MIPI, RTSP, VIDEO, G1, Q10, GX40, SU17}; class CameraBase { diff --git a/spirecv/dataloader/CameraCppNodes/include/sv2_camera_input.h b/spirecv/dataloader/CameraCppNodes/include/sv2_camera_input.h index 9a6741e..f81fca7 100644 --- a/spirecv/dataloader/CameraCppNodes/include/sv2_camera_input.h +++ b/spirecv/dataloader/CameraCppNodes/include/sv2_camera_input.h @@ -1,7 +1,6 @@ #ifndef __SV2_CAMERA_INPUT__ #define __SV2_CAMERA_INPUT__ - #include #include #include @@ -9,10 +8,8 @@ #include #include "sv2_camera_base.h" - namespace sv2 { - class Camera : public CameraBase { public: @@ -22,7 +19,5 @@ protected: void openImpl(); }; - - } #endif diff --git a/spirecv/dataloader/CameraCppNodes/include/sv2_camera_read.h b/spirecv/dataloader/CameraCppNodes/include/sv2_camera_read.h index 3063867..ead5e7a 100644 --- a/spirecv/dataloader/CameraCppNodes/include/sv2_camera_read.h +++ b/spirecv/dataloader/CameraCppNodes/include/sv2_camera_read.h @@ -70,7 +70,7 @@ namespace sv2 { } void run(); - CameraType getCameraType(const std::string &cameraString); + CameraType getCameraType(const std::string &camera_string); cv::Mat img_; nlohmann::json camera_matrix; diff --git a/spirecv/dataloader/CameraCppNodes/src/sv2_camera_base.cpp b/spirecv/dataloader/CameraCppNodes/src/sv2_camera_base.cpp index d7bd7d5..117f691 100644 --- a/spirecv/dataloader/CameraCppNodes/src/sv2_camera_base.cpp +++ b/spirecv/dataloader/CameraCppNodes/src/sv2_camera_base.cpp @@ -6,9 +6,6 @@ typedef unsigned char byte; namespace sv2 { - - - CameraBase::CameraBase(CameraType type, int id) { this->_is_running = false; @@ -135,7 +132,6 @@ bool CameraBase::isRunning() void CameraBase::openImpl() { - } void CameraBase::open(CameraType type, int id) { @@ -188,7 +184,7 @@ bool CameraBase::read(cv::Mat& image) } if (image.cols == 0 || image.rows == 0) { - throw std::runtime_error("SpireCV 2.0 (101) Camera cannot OPEN, check CAMERA_ID!"); + throw std::runtime_error("SpireCV 2.0 (101) Camera cannot OPEN, please check CAMERA_ID or CAMERA_IP!"); } return image.cols > 0 && image.rows > 0; } diff --git a/spirecv/dataloader/CameraCppNodes/src/sv2_camera_read.cpp b/spirecv/dataloader/CameraCppNodes/src/sv2_camera_read.cpp index 7143240..8a4998a 100644 --- a/spirecv/dataloader/CameraCppNodes/src/sv2_camera_read.cpp +++ b/spirecv/dataloader/CameraCppNodes/src/sv2_camera_read.cpp @@ -3,26 +3,25 @@ #include - namespace sv2 { using namespace cv; -CameraType CameraCppNode::getCameraType(const std::string &cameraString) +CameraType CameraCppNode::getCameraType(const std::string &camera_string) { std::unordered_map cameraTypeMap = { - {"NONE", CameraType::NONE}, - {"WEBCAM", CameraType::WEBCAM}, - {"V4L2CAM", CameraType::V4L2CAM}, - {"MIPI", CameraType::MIPI}, - {"RTSP", CameraType::RTSP}, - {"VIDEO", CameraType::VIDEO}, - {"G1", CameraType::G1}, - {"Q10", CameraType::Q10}, - {"GX40", CameraType::GX40}, - {"SU17", CameraType::SU17} - }; - auto it = cameraTypeMap.find(cameraString); + {"NONE", CameraType::NONE}, + {"WEBCAM", CameraType::WEBCAM}, + {"V4L2CAM", CameraType::V4L2CAM}, + {"MIPI", CameraType::MIPI}, + {"RTSP", CameraType::RTSP}, + {"VIDEO", CameraType::VIDEO}, + {"G1", CameraType::G1}, + {"Q10", CameraType::Q10}, + {"GX40", CameraType::GX40}, + {"SU17", CameraType::SU17} + }; + auto it = cameraTypeMap.find(camera_string); if (it != cameraTypeMap.end()) { return it->second; @@ -41,23 +40,21 @@ void CameraCppNode::run() auto current = std::chrono::steady_clock::now(); std::chrono::duration elapsed_seconds = current - start_time; if (elapsed_seconds.count() >= 1.0) { - - - nlohmann::json calib_msg = sms::def_msg("sensor_msgs::CameraCalibration"); - calib_msg["type"] = "sensor_msgs::CameraCalibration"; - calib_msg["frame_id"] = this->frame_id; - calib_msg["width"] = this->image_width; - calib_msg["height"] = this->image_height; - calib_msg["distortion_model"]= "plumb_bob"; - calib_msg["K"] = this->camera_matrix; - calib_msg["D"] = this->distortion_coefficients; - calib_msg["R"] = this->rectification; - calib_msg["P"] = this->projection; + nlohmann::json calib_msg = sms::def_msg("sensor_msgs::CameraCalibration"); + calib_msg["type"] = "sensor_msgs::CameraCalibration"; + calib_msg["frame_id"] = this->frame_id; + calib_msg["width"] = this->image_width; + calib_msg["height"] = this->image_height; + calib_msg["distortion_model"]= "plumb_bob"; + calib_msg["K"] = this->camera_matrix; + calib_msg["D"] = this->distortion_coefficients; + calib_msg["R"] = this->rectification; + calib_msg["P"] = this->projection; - // 发送sensor_msgs::CameraCalibration话题 - _calib_pub.publish(calib_msg); + // 发送sensor_msgs::CameraCalibration话题 + _calib_pub.publish(calib_msg); - start_time = current; + start_time = current; } // 发送sensor_msgs::CompressedImage话题 -- Gitee From d77eaca5aeb22e1e004101bffc2f5bacbc5146c9 Mon Sep 17 00:00:00 2001 From: Zhang__Ling Date: Mon, 23 Dec 2024 19:30:45 +0800 Subject: [PATCH 4/4] add a folder for MotCppNodes. --- .../algorithm/mot/{ => MultipleObjectTrackerNodes}/CMakeLists.txt | 0 .../algorithm/mot/{ => MultipleObjectTrackerNodes}/SV2MotDemo.cpp | 0 spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/build.sh | 0 .../{ => MultipleObjectTrackerNodes}/bytetrack/BYTETracker.cpp | 0 .../mot/{ => MultipleObjectTrackerNodes}/bytetrack/BYTETracker.h | 0 .../bytetrack/BytekalmanFilter.cpp | 0 .../{ => MultipleObjectTrackerNodes}/bytetrack/BytekalmanFilter.h | 0 .../mot/{ => MultipleObjectTrackerNodes}/bytetrack/STrack.cpp | 0 .../mot/{ => MultipleObjectTrackerNodes}/bytetrack/STrack.h | 0 .../mot/{ => MultipleObjectTrackerNodes}/bytetrack/dataType.h | 0 .../mot/{ => MultipleObjectTrackerNodes}/bytetrack/lapjv.cpp | 0 .../mot/{ => MultipleObjectTrackerNodes}/bytetrack/lapjv.h | 0 .../mot/{ => MultipleObjectTrackerNodes}/bytetrack/utils.cpp | 0 .../algorithm/mot/{ => MultipleObjectTrackerNodes}/sv2_mot.cpp | 0 spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/sv2_mot.h | 0 15 files changed, 0 insertions(+), 0 deletions(-) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/CMakeLists.txt (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/SV2MotDemo.cpp (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/build.sh (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/bytetrack/BYTETracker.cpp (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/bytetrack/BYTETracker.h (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/bytetrack/BytekalmanFilter.cpp (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/bytetrack/BytekalmanFilter.h (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/bytetrack/STrack.cpp (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/bytetrack/STrack.h (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/bytetrack/dataType.h (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/bytetrack/lapjv.cpp (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/bytetrack/lapjv.h (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/bytetrack/utils.cpp (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/sv2_mot.cpp (100%) rename spirecv/algorithm/mot/{ => MultipleObjectTrackerNodes}/sv2_mot.h (100%) diff --git a/spirecv/algorithm/mot/CMakeLists.txt b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/CMakeLists.txt similarity index 100% rename from spirecv/algorithm/mot/CMakeLists.txt rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/CMakeLists.txt diff --git a/spirecv/algorithm/mot/SV2MotDemo.cpp b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/SV2MotDemo.cpp similarity index 100% rename from spirecv/algorithm/mot/SV2MotDemo.cpp rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/SV2MotDemo.cpp diff --git a/spirecv/algorithm/mot/build.sh b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/build.sh similarity index 100% rename from spirecv/algorithm/mot/build.sh rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/build.sh diff --git a/spirecv/algorithm/mot/bytetrack/BYTETracker.cpp b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/BYTETracker.cpp similarity index 100% rename from spirecv/algorithm/mot/bytetrack/BYTETracker.cpp rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/BYTETracker.cpp diff --git a/spirecv/algorithm/mot/bytetrack/BYTETracker.h b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/BYTETracker.h similarity index 100% rename from spirecv/algorithm/mot/bytetrack/BYTETracker.h rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/BYTETracker.h diff --git a/spirecv/algorithm/mot/bytetrack/BytekalmanFilter.cpp b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/BytekalmanFilter.cpp similarity index 100% rename from spirecv/algorithm/mot/bytetrack/BytekalmanFilter.cpp rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/BytekalmanFilter.cpp diff --git a/spirecv/algorithm/mot/bytetrack/BytekalmanFilter.h b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/BytekalmanFilter.h similarity index 100% rename from spirecv/algorithm/mot/bytetrack/BytekalmanFilter.h rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/BytekalmanFilter.h diff --git a/spirecv/algorithm/mot/bytetrack/STrack.cpp b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/STrack.cpp similarity index 100% rename from spirecv/algorithm/mot/bytetrack/STrack.cpp rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/STrack.cpp diff --git a/spirecv/algorithm/mot/bytetrack/STrack.h b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/STrack.h similarity index 100% rename from spirecv/algorithm/mot/bytetrack/STrack.h rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/STrack.h diff --git a/spirecv/algorithm/mot/bytetrack/dataType.h b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/dataType.h similarity index 100% rename from spirecv/algorithm/mot/bytetrack/dataType.h rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/dataType.h diff --git a/spirecv/algorithm/mot/bytetrack/lapjv.cpp b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/lapjv.cpp similarity index 100% rename from spirecv/algorithm/mot/bytetrack/lapjv.cpp rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/lapjv.cpp diff --git a/spirecv/algorithm/mot/bytetrack/lapjv.h b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/lapjv.h similarity index 100% rename from spirecv/algorithm/mot/bytetrack/lapjv.h rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/lapjv.h diff --git a/spirecv/algorithm/mot/bytetrack/utils.cpp b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/utils.cpp similarity index 100% rename from spirecv/algorithm/mot/bytetrack/utils.cpp rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/bytetrack/utils.cpp diff --git a/spirecv/algorithm/mot/sv2_mot.cpp b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/sv2_mot.cpp similarity index 100% rename from spirecv/algorithm/mot/sv2_mot.cpp rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/sv2_mot.cpp diff --git a/spirecv/algorithm/mot/sv2_mot.h b/spirecv/algorithm/mot/MultipleObjectTrackerNodes/sv2_mot.h similarity index 100% rename from spirecv/algorithm/mot/sv2_mot.h rename to spirecv/algorithm/mot/MultipleObjectTrackerNodes/sv2_mot.h -- Gitee