mirror of
https://github.com/kasmtech/KasmVNC.git
synced 2025-06-26 20:51:49 +02:00
KASM-6984 Moved to a dynamic library load
This commit is contained in:
parent
f64dc1a257
commit
415607ea42
@ -68,6 +68,7 @@ set(RFB_SOURCES
|
||||
encodings.cxx
|
||||
util.cxx
|
||||
xxhash.c
|
||||
ffmpeg.cxx
|
||||
)
|
||||
|
||||
if (UNIX)
|
||||
@ -143,7 +144,7 @@ target_include_directories(rfb PRIVATE
|
||||
${FFMPEG_INCLUDE_DIRS}
|
||||
)
|
||||
|
||||
target_link_libraries(rfb PRIVATE ${RFB_LIBRARIES} tinyxml2_objs ${FFMPEG_LIBRARIES})
|
||||
target_link_libraries(rfb PRIVATE ${RFB_LIBRARIES} tinyxml2_objs)
|
||||
|
||||
if (UNIX)
|
||||
libtool_create_control_file(rfb)
|
||||
|
@ -48,8 +48,8 @@
|
||||
// otherwise blacklisted connections might be "forgotten".
|
||||
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
#include <cassert>
|
||||
#include <cstdlib>
|
||||
|
||||
#include <network/GetAPI.h>
|
||||
#include <network/Udp.h>
|
||||
@ -74,6 +74,7 @@
|
||||
#include <unistd.h>
|
||||
#include <wordexp.h>
|
||||
#include <filesystem>
|
||||
#include <string_view>
|
||||
|
||||
using namespace rfb;
|
||||
|
||||
@ -83,7 +84,7 @@ EncCache VNCServerST::encCache;
|
||||
|
||||
void SelfBench();
|
||||
|
||||
void benchmark(const std::string&, const std::string&);
|
||||
void benchmark(std::string_view, std::string_view);
|
||||
|
||||
//
|
||||
// -=- VNCServerST Implementation
|
||||
|
@ -1,4 +1,6 @@
|
||||
/* Copyright (C) 2025 Kasm Technologies Corp
|
||||
/* Copyright 2015 Pierre Ossman <ossman@cendio.se> for Cendio AB
|
||||
* Copyright (C) 2015 D. R. Commander. All Rights Reserved.
|
||||
* Copyright (C) 2025 Kasm Technologies Corp
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -17,16 +19,253 @@
|
||||
*/
|
||||
|
||||
#include "benchmark.h"
|
||||
#include <string>
|
||||
#include <stdexcept>
|
||||
#include <string_view>
|
||||
#include <rfb/LogWriter.h>
|
||||
#include <numeric>
|
||||
#include <tinyxml2.h>
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
|
||||
#include "ServerCore.h"
|
||||
#include <cmath>
|
||||
|
||||
#include "EncCache.h"
|
||||
#include "EncodeManager.h"
|
||||
#include "SConnection.h"
|
||||
#include "screenTypes.h"
|
||||
#include "SMsgWriter.h"
|
||||
#include "UpdateTracker.h"
|
||||
#include "rdr/BufferedInStream.h"
|
||||
#include "rdr/OutStream.h"
|
||||
#include "ffmpeg.h"
|
||||
|
||||
namespace benchmarking {
|
||||
class MockBufferStream final : public rdr::BufferedInStream {
|
||||
bool fillBuffer(size_t maxSize, bool wait) override {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
class MockStream final : public rdr::OutStream {
|
||||
public:
|
||||
MockStream() {
|
||||
offset = 0;
|
||||
ptr = buf;
|
||||
end = buf + sizeof(buf);
|
||||
}
|
||||
|
||||
private:
|
||||
void overrun(size_t needed) override {
|
||||
assert(end >= ptr);
|
||||
if (needed > static_cast<size_t>(end - ptr))
|
||||
flush();
|
||||
}
|
||||
|
||||
public:
|
||||
size_t length() override {
|
||||
flush();
|
||||
return offset;
|
||||
}
|
||||
|
||||
void flush() override {
|
||||
offset += ptr - buf;
|
||||
ptr = buf;
|
||||
}
|
||||
|
||||
private:
|
||||
ptrdiff_t offset;
|
||||
rdr::U8 buf[8192]{};
|
||||
};
|
||||
|
||||
class MockSConnection final : public rfb::SConnection {
|
||||
public:
|
||||
MockSConnection() {
|
||||
setStreams(nullptr, &out);
|
||||
|
||||
setWriter(new rfb::SMsgWriter(&cp, &out, &udps));
|
||||
}
|
||||
|
||||
~MockSConnection() override = default;
|
||||
|
||||
void writeUpdate(const rfb::UpdateInfo &ui, const rfb::PixelBuffer *pb) {
|
||||
cache.clear();
|
||||
|
||||
manager.clearEncodingTime();
|
||||
if (!ui.is_empty()) {
|
||||
manager.writeUpdate(ui, pb, nullptr);
|
||||
} else {
|
||||
rfb::Region region{pb->getRect()};
|
||||
manager.writeLosslessRefresh(region, pb, nullptr, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
void setDesktopSize(int fb_width, int fb_height,
|
||||
const rfb::ScreenSet &layout) override {
|
||||
cp.width = fb_width;
|
||||
cp.height = fb_height;
|
||||
cp.screenLayout = layout;
|
||||
|
||||
writer()->writeExtendedDesktopSize(rfb::reasonServer, 0, cp.width, cp.height,
|
||||
cp.screenLayout);
|
||||
}
|
||||
|
||||
void sendStats(const bool toClient) override {
|
||||
}
|
||||
|
||||
[[nodiscard]] bool canChangeKasmSettings() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
void udpUpgrade(const char *resp) override {
|
||||
}
|
||||
|
||||
void udpDowngrade(const bool) override {
|
||||
}
|
||||
|
||||
void subscribeUnixRelay(const char *name) override {
|
||||
}
|
||||
|
||||
void unixRelay(const char *name, const rdr::U8 *buf, const unsigned len) override {
|
||||
}
|
||||
|
||||
void handleFrameStats(rdr::U32 all, rdr::U32 render) override {
|
||||
}
|
||||
|
||||
[[nodiscard]] auto getJpegStats() const {
|
||||
return manager.jpegstats;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto getWebPStats() const {
|
||||
return manager.webpstats;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto bytes() { return out.length(); }
|
||||
[[nodiscard]] auto udp_bytes() { return udps.length(); }
|
||||
|
||||
protected:
|
||||
MockStream out{};
|
||||
MockStream udps{};
|
||||
|
||||
EncCache cache{};
|
||||
EncodeManager manager{this, &cache};
|
||||
};
|
||||
|
||||
class MockCConnection final : public MockTestConnection {
|
||||
public:
|
||||
explicit MockCConnection(const std::vector<rdr::S32> &encodings, rfb::ManagedPixelBuffer *pb) {
|
||||
setStreams(&in, nullptr);
|
||||
|
||||
// Need to skip the initial handshake and ServerInit
|
||||
setState(RFBSTATE_NORMAL);
|
||||
// That also means that the reader and writer weren't set up
|
||||
setReader(new rfb::CMsgReader(this, &in));
|
||||
auto &pf = pb->getPF();
|
||||
CMsgHandler::setPixelFormat(pf);
|
||||
|
||||
MockCConnection::setDesktopSize(pb->width(), pb->height());
|
||||
|
||||
cp.setPF(pf);
|
||||
|
||||
sc.cp.setPF(pf);
|
||||
sc.setEncodings(std::size(encodings), encodings.data());
|
||||
|
||||
setFramebuffer(pb);
|
||||
}
|
||||
|
||||
void setCursor(int width, int height, const rfb::Point &hotspot, const rdr::U8 *data,
|
||||
const bool resizing) override {
|
||||
}
|
||||
|
||||
~MockCConnection() override = default;
|
||||
|
||||
struct stats_t {
|
||||
EncodeManager::codecstats_t jpeg_stats;
|
||||
EncodeManager::codecstats_t webp_stats;
|
||||
uint64_t bytes;
|
||||
uint64_t udp_bytes;
|
||||
};
|
||||
|
||||
[[nodiscard]] stats_t getStats() {
|
||||
return {
|
||||
sc.getJpegStats(),
|
||||
sc.getWebPStats(),
|
||||
sc.bytes(),
|
||||
sc.udp_bytes()
|
||||
};
|
||||
}
|
||||
|
||||
void setDesktopSize(int w, int h) override {
|
||||
CConnection::setDesktopSize(w, h);
|
||||
|
||||
if (screen_layout.num_screens())
|
||||
screen_layout.remove_screen(0);
|
||||
|
||||
screen_layout.add_screen(rfb::Screen(0, 0, 0, w, h, 0));
|
||||
}
|
||||
|
||||
void setNewFrame(const AVFrame *frame) override {
|
||||
auto *pb = getFramebuffer();
|
||||
const int width = pb->width();
|
||||
const int height = pb->height();
|
||||
const rfb::Rect rect(0, 0, width, height);
|
||||
|
||||
int dstStride{};
|
||||
auto *buffer = pb->getBufferRW(rect, &dstStride);
|
||||
|
||||
const rfb::PixelFormat &pf = pb->getPF();
|
||||
|
||||
// Source data and stride from FFmpeg
|
||||
const auto *srcData = frame->data[0];
|
||||
const int srcStride = frame->linesize[0] / 3; // Convert bytes to pixels
|
||||
|
||||
// Convert from the RGB format to the PixelBuffer's format
|
||||
pf.bufferFromRGB(buffer, srcData, width, srcStride, height);
|
||||
|
||||
// Commit changes
|
||||
pb->commitBufferRW(rect);
|
||||
}
|
||||
|
||||
void framebufferUpdateStart() override {
|
||||
updates.clear();
|
||||
}
|
||||
|
||||
void framebufferUpdateEnd() override {
|
||||
const rfb::PixelBuffer *pb = getFramebuffer();
|
||||
|
||||
rfb::UpdateInfo ui;
|
||||
const rfb::Region clip(pb->getRect());
|
||||
|
||||
updates.add_changed(pb->getRect());
|
||||
|
||||
updates.getUpdateInfo(&ui, clip);
|
||||
sc.writeUpdate(ui, pb);
|
||||
}
|
||||
|
||||
void dataRect(const rfb::Rect &r, int encoding) override {
|
||||
}
|
||||
|
||||
void setColourMapEntries(int, int, rdr::U16 *) override {
|
||||
}
|
||||
|
||||
void bell() override {
|
||||
}
|
||||
|
||||
void serverCutText(const char *, rdr::U32) override {
|
||||
}
|
||||
|
||||
void serverCutText(const char *str) override {
|
||||
}
|
||||
|
||||
protected:
|
||||
MockBufferStream in;
|
||||
rfb::ScreenSet screen_layout;
|
||||
rfb::SimpleUpdateTracker updates;
|
||||
MockSConnection sc;
|
||||
};
|
||||
}
|
||||
|
||||
void report(std::vector<uint64_t> &totals, std::vector<uint64_t> &timings,
|
||||
std::vector<rfb::MockCConnection::stats_t> &stats, const std::string &results_file) {
|
||||
std::vector<benchmarking::MockCConnection::stats_t> &stats, const std::string_view results_file) {
|
||||
auto totals_sum = std::accumulate(totals.begin(), totals.end(), 0.);
|
||||
auto totals_avg = totals_sum / static_cast<double>(totals.size());
|
||||
|
||||
@ -109,148 +348,52 @@ void report(std::vector<uint64_t> &totals, std::vector<uint64_t> &timings,
|
||||
|
||||
add_benchmark_item("Data sent, KBs", 0, bytes / 1024);
|
||||
|
||||
doc.SaveFile(results_file.c_str());
|
||||
doc.SaveFile(results_file.data());
|
||||
}
|
||||
|
||||
void benchmark(const std::string &path, const std::string &results_file) {
|
||||
AVFormatContext *format_ctx = nullptr;
|
||||
void benchmark(std::string_view path, const std::string_view results_file) {
|
||||
try {
|
||||
vlog.info("Benchmarking with video file %s", path.data());
|
||||
FFmpegFrameFeeder frame_feeder{};
|
||||
frame_feeder.open(path);
|
||||
|
||||
vlog.info("Benchmarking with video file %s", path.c_str());
|
||||
static const rfb::PixelFormat pf{32, 24, false, true, 0xFF, 0xFF, 0xFF, 0, 8, 16};
|
||||
const std::vector<rdr::S32> encodings{
|
||||
std::begin(benchmarking::default_encodings), std::end(benchmarking::default_encodings)
|
||||
};
|
||||
|
||||
if (avformat_open_input(&format_ctx, path.c_str(), nullptr, nullptr) < 0)
|
||||
throw std::runtime_error("Could not open video file");
|
||||
// if (rfb::Server::WebPEnabled)
|
||||
// encodings.push_back(rfb::pseudoEncodingWEBP);
|
||||
|
||||
FormatCtxGuard format_ctx_guard{format_ctx};
|
||||
constexpr auto runs = 20;
|
||||
std::vector<uint64_t> totals(runs, 0);
|
||||
std::vector<benchmarking::MockCConnection::stats_t> stats(runs);
|
||||
std::vector<uint64_t> timings{};
|
||||
auto [width, height] = frame_feeder.get_frame_dimensions();
|
||||
|
||||
// Find stream info
|
||||
if (avformat_find_stream_info(format_ctx, nullptr) < 0)
|
||||
throw std::runtime_error("Could not find stream info");
|
||||
for (int run = 0; run < runs; ++run) {
|
||||
auto *pb = new rfb::ManagedPixelBuffer{pf, width, height};
|
||||
benchmarking::MockCConnection connection{encodings, pb};
|
||||
|
||||
// Find video stream
|
||||
int video_stream_idx = -1;
|
||||
for (uint32_t i = 0; i < format_ctx->nb_streams; ++i) {
|
||||
if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
video_stream_idx = static_cast<int>(i);
|
||||
break;
|
||||
vlog.info("RUN %d. Reading frames...", run);
|
||||
auto play_stats = frame_feeder.play(&connection);
|
||||
vlog.info("RUN %d. Done reading frames...", run);
|
||||
|
||||
timings.insert(timings.end(), play_stats.timings.begin(), play_stats.timings.end());
|
||||
|
||||
totals[run] = play_stats.total;
|
||||
stats[run] = connection.getStats();
|
||||
vlog.info("JPEG stats: %u ms", stats[run].jpeg_stats.ms);
|
||||
vlog.info("WebP stats: %u ms", stats[run].webp_stats.ms);
|
||||
vlog.info("RUN %d. Bytes sent %lu..", run, stats[run].bytes);
|
||||
}
|
||||
|
||||
if (!timings.empty())
|
||||
report(totals, timings, stats, results_file);
|
||||
|
||||
exit(0);
|
||||
} catch (std::exception &e) {
|
||||
vlog.error("Benchmarking failed: %s", e.what());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (video_stream_idx == -1)
|
||||
throw std::runtime_error("No video stream found");
|
||||
|
||||
// Get codec parameters and decoder
|
||||
const auto *codec_parameters = format_ctx->streams[video_stream_idx]->codecpar;
|
||||
const auto *codec = avcodec_find_decoder(codec_parameters->codec_id);
|
||||
if (!codec)
|
||||
throw std::runtime_error("Codec not found");
|
||||
|
||||
const CodecCtxGuard codex_ctx_guard{avcodec_alloc_context3(codec)};
|
||||
auto *codec_ctx = codex_ctx_guard.get();
|
||||
|
||||
if (!codec_ctx || avcodec_parameters_to_context(codec_ctx, codec_parameters) < 0)
|
||||
throw std::runtime_error("Failed to set up codec context");
|
||||
|
||||
if (avcodec_open2(codec_ctx, codec, nullptr) < 0)
|
||||
throw std::runtime_error("Could not open codec");
|
||||
|
||||
// Allocate frame and packet
|
||||
const FrameGuard frame_guard{av_frame_alloc()};
|
||||
auto *frame = frame_guard.get();
|
||||
|
||||
const PacketGuard packet_guard{av_packet_alloc()};
|
||||
auto *packet = packet_guard.get();
|
||||
|
||||
if (!frame || !packet)
|
||||
throw std::runtime_error("Could not allocate frame or packet");
|
||||
|
||||
// Scaling context to convert to RGB24
|
||||
SwsContext *sws_ctx = sws_getContext(
|
||||
codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
|
||||
codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24,
|
||||
SWS_BILINEAR, nullptr, nullptr, nullptr
|
||||
);
|
||||
|
||||
if (!sws_ctx)
|
||||
throw std::runtime_error("Could not create scaling context");
|
||||
|
||||
SwsContextGuard sws_ctx_guard{sws_ctx};
|
||||
|
||||
const FrameGuard rgb_frame_guard{av_frame_alloc()};
|
||||
auto *rgb_frame = rgb_frame_guard.get();
|
||||
|
||||
if (!rgb_frame)
|
||||
throw std::runtime_error("Could not allocate frame");
|
||||
|
||||
rgb_frame->format = AV_PIX_FMT_RGB24;
|
||||
rgb_frame->width = codec_ctx->width;
|
||||
rgb_frame->height = codec_ctx->height;
|
||||
|
||||
static const rfb::PixelFormat pf{32, 24, false, true, 0xFF, 0xFF, 0xFF, 0, 8, 16};
|
||||
std::vector<rdr::S32> encodings{std::begin(rfb::default_encodings), std::end(rfb::default_encodings)};
|
||||
|
||||
// if (rfb::Server::WebPEnabled)
|
||||
// encodings.push_back(rfb::pseudoEncodingWEBP);
|
||||
|
||||
if (av_frame_get_buffer(rgb_frame, 0) != 0)
|
||||
throw std::runtime_error("Could not allocate frame data");
|
||||
|
||||
constexpr auto runs = 20;
|
||||
std::vector<uint64_t> totals(runs, 0);
|
||||
std::vector<rfb::MockCConnection::stats_t> stats(runs);
|
||||
const size_t total_frame_count = format_ctx->streams[video_stream_idx]->nb_frames;
|
||||
std::vector<uint64_t> timings(total_frame_count > 0 ? total_frame_count * runs : 2048, 0);
|
||||
uint64_t frames{};
|
||||
|
||||
for (int run = 0; run < runs; ++run) {
|
||||
auto *pb = new rfb::ManagedPixelBuffer{pf, rgb_frame->width, rgb_frame->height};
|
||||
rfb::MockCConnection connection{encodings, pb};
|
||||
|
||||
uint64_t total{};
|
||||
|
||||
vlog.info("RUN %d. Reading frames...", run);
|
||||
while (av_read_frame(format_ctx, packet) == 0) {
|
||||
if (packet->stream_index == video_stream_idx) {
|
||||
if (avcodec_send_packet(codec_ctx, packet) == 0) {
|
||||
while (avcodec_receive_frame(codec_ctx, frame) == 0) {
|
||||
// Convert to RGB
|
||||
sws_scale(sws_ctx, frame->data, frame->linesize, 0, frame->height,
|
||||
rgb_frame->data, rgb_frame->linesize);
|
||||
|
||||
connection.framebufferUpdateStart();
|
||||
connection.setNewFrame(rgb_frame);
|
||||
using namespace std::chrono;
|
||||
|
||||
auto now = high_resolution_clock::now();
|
||||
connection.framebufferUpdateEnd();
|
||||
const auto duration = duration_cast<milliseconds>(high_resolution_clock::now() - now).count();
|
||||
|
||||
//vlog.info("Frame took %lu ms", duration);
|
||||
|
||||
timings[frames++] = duration;
|
||||
total += duration;
|
||||
}
|
||||
}
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
}
|
||||
vlog.info("RUN %d. Done reading frames...", run);
|
||||
|
||||
if (av_seek_frame(format_ctx, video_stream_idx, 0, AVSEEK_FLAG_BACKWARD) < 0)
|
||||
throw std::runtime_error("Could not seek to start of video");
|
||||
|
||||
avcodec_flush_buffers(codec_ctx);
|
||||
|
||||
totals[run] = total;
|
||||
stats[run] = connection.getStats();
|
||||
vlog.info("JPEG stats: %u ms", stats[run].jpeg_stats.ms);
|
||||
vlog.info("WebP stats: %u ms", stats[run].webp_stats.ms);
|
||||
vlog.info("RUN %d. Bytes sent %lu..", run, stats[run].bytes);
|
||||
}
|
||||
|
||||
if (frames > 0)
|
||||
report(totals, timings, stats, results_file);
|
||||
|
||||
avcodec_close(codec_ctx);
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
@ -1,7 +1,5 @@
|
||||
/* Copyright 2015 Pierre Ossman <ossman@cendio.se> for Cendio AB
|
||||
* Copyright (C) 2015 D. R. Commander. All Rights Reserved.
|
||||
* Copyright (C) 2025 Kasm Technologies Corp
|
||||
*
|
||||
/* Copyright (C) 2025 Kasm Technologies Corp
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
@ -20,22 +18,24 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cassert>
|
||||
#include <rdr/FileInStream.h>
|
||||
#include <rfb/VNCServer.h>
|
||||
|
||||
#include "CConnection.h"
|
||||
#include "CMsgReader.h"
|
||||
#include "EncCache.h"
|
||||
#include "EncodeManager.h"
|
||||
#include "LogWriter.h"
|
||||
#include "screenTypes.h"
|
||||
#include "SMsgWriter.h"
|
||||
#include "ffmpeg.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/frame.h>
|
||||
}
|
||||
|
||||
static rfb::LogWriter vlog("Benchmarking");
|
||||
|
||||
namespace rfb {
|
||||
namespace benchmarking {
|
||||
using namespace rfb;
|
||||
|
||||
class MockTestConnection : public CConnection {
|
||||
public:
|
||||
virtual void setNewFrame(const AVFrame *frame) = 0;
|
||||
};
|
||||
|
||||
static constexpr rdr::S32 default_encodings[] = {
|
||||
encodingTight,
|
||||
encodingZRLE,
|
||||
@ -49,226 +49,4 @@ namespace rfb {
|
||||
//pseudoEncodingWEBP
|
||||
//pseudoEncodingQOI
|
||||
};
|
||||
|
||||
class MockBufferStream final : public rdr::BufferedInStream {
|
||||
bool fillBuffer(size_t maxSize, bool wait) override {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
class MockStream final : public rdr::OutStream {
|
||||
public:
|
||||
MockStream() {
|
||||
offset = 0;
|
||||
ptr = buf;
|
||||
end = buf + sizeof(buf);
|
||||
}
|
||||
|
||||
private:
|
||||
void overrun(size_t needed) override {
|
||||
assert(end >= ptr);
|
||||
if (needed > static_cast<size_t>(end - ptr))
|
||||
flush();
|
||||
}
|
||||
|
||||
public:
|
||||
size_t length() override {
|
||||
flush();
|
||||
return offset;
|
||||
}
|
||||
|
||||
void flush() override {
|
||||
offset += ptr - buf;
|
||||
ptr = buf;
|
||||
}
|
||||
|
||||
private:
|
||||
ptrdiff_t offset;
|
||||
rdr::U8 buf[8192]{};
|
||||
};
|
||||
|
||||
class MockSConnection final : public SConnection {
|
||||
public:
|
||||
MockSConnection() {
|
||||
setStreams(nullptr, &out);
|
||||
|
||||
setWriter(new SMsgWriter(&cp, &out, &udps));
|
||||
}
|
||||
|
||||
~MockSConnection() override = default;
|
||||
|
||||
void writeUpdate(const UpdateInfo &ui, const PixelBuffer *pb) {
|
||||
cache.clear();
|
||||
|
||||
manager.clearEncodingTime();
|
||||
if (!ui.is_empty()) {
|
||||
manager.writeUpdate(ui, pb, nullptr);
|
||||
} else {
|
||||
Region region{pb->getRect()};
|
||||
manager.writeLosslessRefresh(region, pb, nullptr, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
void setDesktopSize(int fb_width, int fb_height,
|
||||
const ScreenSet &layout) override {
|
||||
cp.width = fb_width;
|
||||
cp.height = fb_height;
|
||||
cp.screenLayout = layout;
|
||||
|
||||
writer()->writeExtendedDesktopSize(reasonServer, 0, cp.width, cp.height,
|
||||
cp.screenLayout);
|
||||
}
|
||||
|
||||
void sendStats(const bool toClient) override {
|
||||
}
|
||||
|
||||
[[nodiscard]] bool canChangeKasmSettings() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
void udpUpgrade(const char *resp) override {
|
||||
}
|
||||
|
||||
void udpDowngrade(const bool) override {
|
||||
}
|
||||
|
||||
void subscribeUnixRelay(const char *name) override {
|
||||
}
|
||||
|
||||
void unixRelay(const char *name, const rdr::U8 *buf, const unsigned len) override {
|
||||
}
|
||||
|
||||
void handleFrameStats(rdr::U32 all, rdr::U32 render) override {
|
||||
}
|
||||
|
||||
[[nodiscard]] auto getJpegStats() const {
|
||||
return manager.jpegstats;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto getWebPStats() const {
|
||||
return manager.webpstats;
|
||||
}
|
||||
|
||||
[[nodiscard]] auto bytes() { return out.length(); }
|
||||
[[nodiscard]] auto udp_bytes() { return udps.length(); }
|
||||
|
||||
protected:
|
||||
MockStream out{};
|
||||
MockStream udps{};
|
||||
|
||||
EncCache cache{};
|
||||
EncodeManager manager{this, &cache};
|
||||
};
|
||||
|
||||
class MockCConnection final : public CConnection {
|
||||
public:
|
||||
explicit MockCConnection(const std::vector<rdr::S32>& encodings, ManagedPixelBuffer *pb) {
|
||||
setStreams(&in, nullptr);
|
||||
|
||||
// Need to skip the initial handshake and ServerInit
|
||||
setState(RFBSTATE_NORMAL);
|
||||
// That also means that the reader and writer weren't set up
|
||||
setReader(new rfb::CMsgReader(this, &in));
|
||||
auto &pf = pb->getPF();
|
||||
CMsgHandler::setPixelFormat(pf);
|
||||
|
||||
MockCConnection::setDesktopSize(pb->width(), pb->height());
|
||||
|
||||
cp.setPF(pf);
|
||||
|
||||
sc.cp.setPF(pf);
|
||||
sc.setEncodings(std::size(encodings), encodings.data());
|
||||
|
||||
setFramebuffer(pb);
|
||||
}
|
||||
|
||||
void setCursor(int width, int height, const Point &hotspot, const rdr::U8 *data, const bool resizing) override {
|
||||
}
|
||||
|
||||
~MockCConnection() override = default;
|
||||
|
||||
struct stats_t {
|
||||
EncodeManager::codecstats_t jpeg_stats;
|
||||
EncodeManager::codecstats_t webp_stats;
|
||||
uint64_t bytes;
|
||||
uint64_t udp_bytes;
|
||||
};
|
||||
|
||||
[[nodiscard]] stats_t getStats() {
|
||||
return {
|
||||
sc.getJpegStats(),
|
||||
sc.getWebPStats(),
|
||||
sc.bytes(),
|
||||
sc.udp_bytes()
|
||||
};
|
||||
}
|
||||
|
||||
void setDesktopSize(int w, int h) override {
|
||||
CConnection::setDesktopSize(w, h);
|
||||
|
||||
if (screen_layout.num_screens())
|
||||
screen_layout.remove_screen(0);
|
||||
|
||||
screen_layout.add_screen(Screen(0, 0, 0, w, h, 0));
|
||||
}
|
||||
|
||||
void setNewFrame(const AVFrame *frame) {
|
||||
auto *pb = getFramebuffer();
|
||||
const int width = pb->width();
|
||||
const int height = pb->height();
|
||||
const rfb::Rect rect(0, 0, width, height);
|
||||
|
||||
int dstStride{};
|
||||
auto *buffer = pb->getBufferRW(rect, &dstStride);
|
||||
|
||||
const PixelFormat &pf = pb->getPF();
|
||||
|
||||
// Source data and stride from FFmpeg
|
||||
const auto *srcData = frame->data[0];
|
||||
const int srcStride = frame->linesize[0] / 3; // Convert bytes to pixels
|
||||
|
||||
// Convert from the RGB format to the PixelBuffer's format
|
||||
pf.bufferFromRGB(buffer, srcData, width, srcStride, height);
|
||||
|
||||
// Commit changes
|
||||
pb->commitBufferRW(rect);
|
||||
}
|
||||
|
||||
void framebufferUpdateStart() override {
|
||||
updates.clear();
|
||||
}
|
||||
|
||||
void framebufferUpdateEnd() override {
|
||||
const PixelBuffer *pb = getFramebuffer();
|
||||
|
||||
UpdateInfo ui;
|
||||
const Region clip(pb->getRect());
|
||||
|
||||
updates.add_changed(pb->getRect());
|
||||
|
||||
updates.getUpdateInfo(&ui, clip);
|
||||
sc.writeUpdate(ui, pb);
|
||||
}
|
||||
|
||||
void dataRect(const Rect &r, int encoding) override {
|
||||
}
|
||||
|
||||
void setColourMapEntries(int, int, rdr::U16 *) override {
|
||||
}
|
||||
|
||||
void bell() override {
|
||||
}
|
||||
|
||||
void serverCutText(const char *, rdr::U32) override {
|
||||
}
|
||||
|
||||
void serverCutText(const char *str) override {
|
||||
}
|
||||
|
||||
protected:
|
||||
MockBufferStream in;
|
||||
ScreenSet screen_layout;
|
||||
SimpleUpdateTracker updates;
|
||||
MockSConnection sc;
|
||||
};
|
||||
}
|
||||
|
210
common/rfb/ffmpeg.cxx
Normal file
210
common/rfb/ffmpeg.cxx
Normal file
@ -0,0 +1,210 @@
|
||||
/* Copyright (C) 2025 Kasm Technologies Corp
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This software is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this software; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
|
||||
* USA.
|
||||
*/
|
||||
|
||||
#include "ffmpeg.h"
|
||||
#include <array>
|
||||
#include <string_view>
|
||||
#include <filesystem>
|
||||
|
||||
FFmpegFrameFeeder::FFmpegFrameFeeder() {
|
||||
static constexpr std::array<std::string_view, 2> paths = {
|
||||
"/usr/lib/",
|
||||
"/usr/lib64"
|
||||
};
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
using namespace std::string_literals;
|
||||
|
||||
auto load_lib = [](auto *lib) {
|
||||
void *handle{};
|
||||
for (const auto &dir: paths) {
|
||||
if (!fs::exists(dir) || !fs::is_directory(dir))
|
||||
continue;
|
||||
|
||||
for (const auto &entry: fs::recursive_directory_iterator(dir)) {
|
||||
if (!entry.is_regular_file())
|
||||
continue;
|
||||
|
||||
const std::string filename = entry.path().filename().string();
|
||||
if (filename.find(lib) != std::string::npos) {
|
||||
handle = dlopen(filename.c_str(), RTLD_LAZY);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!handle)
|
||||
throw std::runtime_error("Could not open "s + lib);
|
||||
|
||||
return DlHandlerGuard{handle};
|
||||
};
|
||||
|
||||
// libavformat
|
||||
libavformat = load_lib("libavformat.so");
|
||||
auto handle = libavformat.get();
|
||||
|
||||
avformat_open_input_f = D_LOOKUP_SYM(handle, avformat_open_input);
|
||||
avformat_find_stream_info_f = D_LOOKUP_SYM(handle, avformat_find_stream_info);
|
||||
avcodec_find_decoder_f = D_LOOKUP_SYM(handle, avcodec_find_decoder);
|
||||
avcodec_parameters_to_context_f = D_LOOKUP_SYM(handle, avcodec_parameters_to_context);
|
||||
av_read_frame_f = D_LOOKUP_SYM(handle, av_read_frame);
|
||||
av_seek_frame_f = D_LOOKUP_SYM(handle, av_seek_frame);
|
||||
avformat_close_input_f = D_LOOKUP_SYM(handle, avformat_close_input);
|
||||
|
||||
vlog.info("libavformat.so loaded");
|
||||
|
||||
// libavutil
|
||||
libavutil = load_lib("libavutil.so");
|
||||
handle = libavutil.get();
|
||||
|
||||
av_frame_free_f = D_LOOKUP_SYM(handle, av_frame_free);
|
||||
av_frame_alloc_f = D_LOOKUP_SYM(handle, av_frame_alloc);
|
||||
av_frame_get_buffer_f = D_LOOKUP_SYM(handle, av_frame_get_buffer);
|
||||
|
||||
vlog.info("libavutil.so loaded");
|
||||
|
||||
// libswscale
|
||||
libswscale = load_lib("libswscale.so");
|
||||
handle = libswscale.get();
|
||||
|
||||
sws_freeContext_f = D_LOOKUP_SYM(handle, sws_freeContext);
|
||||
sws_getContext_f = D_LOOKUP_SYM(handle, sws_getContext);
|
||||
sws_scale_f = D_LOOKUP_SYM(handle, sws_scale);
|
||||
|
||||
// libavcodec
|
||||
libavcodec = load_lib("libavcodec.so");
|
||||
handle = libavcodec.get();
|
||||
|
||||
avcodec_open2_f = D_LOOKUP_SYM(handle, avcodec_open2);
|
||||
avcodec_alloc_context3_f = D_LOOKUP_SYM(handle, avcodec_alloc_context3);
|
||||
avcodec_send_packet_f = D_LOOKUP_SYM(handle, avcodec_send_packet);
|
||||
avcodec_receive_frame_f = D_LOOKUP_SYM(handle, avcodec_receive_frame);
|
||||
av_packet_unref_f = D_LOOKUP_SYM(handle, av_packet_unref);
|
||||
avcodec_flush_buffers_f = D_LOOKUP_SYM(handle, avcodec_flush_buffers);
|
||||
avcodec_close_f = D_LOOKUP_SYM(handle, avcodec_close);
|
||||
av_packet_alloc_f = D_LOOKUP_SYM(handle, av_packet_alloc);
|
||||
av_packet_free_f = D_LOOKUP_SYM(handle, av_packet_free);
|
||||
}
|
||||
|
||||
FFmpegFrameFeeder::~FFmpegFrameFeeder() {
|
||||
avformat_close_input_f(&format_ctx);
|
||||
avcodec_close_f(codec_ctx);
|
||||
avcodec_free_context_f(&codec_ctx);
|
||||
}
|
||||
|
||||
void FFmpegFrameFeeder::open(const std::string_view path) {
|
||||
if (avformat_open_input_f(&format_ctx, path.data(), nullptr, nullptr) < 0)
|
||||
throw std::runtime_error("Could not open video file");
|
||||
|
||||
// Find stream info
|
||||
if (avformat_find_stream_info_f(format_ctx, nullptr) < 0)
|
||||
throw std::runtime_error("Could not find stream info");
|
||||
|
||||
// Find video stream
|
||||
for (uint32_t i = 0; i < format_ctx->nb_streams; ++i) {
|
||||
if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
video_stream_idx = static_cast<int>(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (video_stream_idx == -1)
|
||||
throw std::runtime_error("No video stream found");
|
||||
|
||||
// Get codec parameters and decoder
|
||||
const auto *codec_parameters = format_ctx->streams[video_stream_idx]->codecpar;
|
||||
const auto *codec = avcodec_find_decoder_f(codec_parameters->codec_id);
|
||||
if (!codec)
|
||||
throw std::runtime_error("Codec not found");
|
||||
|
||||
codec_ctx = avcodec_alloc_context3_f(codec);
|
||||
if (!codec_ctx || avcodec_parameters_to_context_f(codec_ctx, codec_parameters) < 0)
|
||||
throw std::runtime_error("Failed to set up codec context");
|
||||
|
||||
if (avcodec_open2_f(codec_ctx, codec, nullptr) < 0)
|
||||
throw std::runtime_error("Could not open codec");
|
||||
}
|
||||
|
||||
FFmpegFrameFeeder::play_stats_t FFmpegFrameFeeder::play(benchmarking::MockTestConnection *connection) const {
|
||||
// Allocate frame and packet
|
||||
const FrameGuard frame{av_frame_alloc_f()};
|
||||
const PacketGuard packet{av_packet_alloc_f()};
|
||||
|
||||
if (!frame || !packet)
|
||||
throw std::runtime_error("Could not allocate frame or packet");
|
||||
|
||||
// Scaling context to convert to RGB24
|
||||
SwsContext *sws_ctx = sws_getContext_f(
|
||||
codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
|
||||
codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24,
|
||||
SWS_BILINEAR, nullptr, nullptr, nullptr
|
||||
);
|
||||
if (!sws_ctx)
|
||||
throw std::runtime_error("Could not create scaling context");
|
||||
|
||||
const std::unique_ptr<SwsContext, void(*)(SwsContext *)> sws_ctx_guard{sws_ctx, sws_freeContext_f};
|
||||
|
||||
const FrameGuard rgb_frame{av_frame_alloc_f()};
|
||||
if (!rgb_frame)
|
||||
throw std::runtime_error("Could not allocate frame");
|
||||
|
||||
rgb_frame->format = AV_PIX_FMT_RGB24;
|
||||
rgb_frame->width = codec_ctx->width;
|
||||
rgb_frame->height = codec_ctx->height;
|
||||
|
||||
if (av_frame_get_buffer_f(rgb_frame.get(), 0) != 0)
|
||||
throw std::runtime_error("Could not allocate frame data");
|
||||
|
||||
play_stats_t stats{};
|
||||
const auto total_frame_count = get_total_frame_count();
|
||||
stats.timings.reserve(total_frame_count > 0 ? total_frame_count : 2048);
|
||||
|
||||
while (av_read_frame_f(format_ctx, packet.get()) == 0) {
|
||||
if (packet->stream_index == video_stream_idx) {
|
||||
if (avcodec_send_packet_f(codec_ctx, packet.get()) == 0) {
|
||||
while (avcodec_receive_frame_f(codec_ctx, frame.get()) == 0) {
|
||||
// Convert to RGB
|
||||
sws_scale_f(sws_ctx_guard.get(), frame->data, frame->linesize, 0,
|
||||
frame->height,
|
||||
rgb_frame->data, rgb_frame->linesize);
|
||||
|
||||
connection->framebufferUpdateStart();
|
||||
connection->setNewFrame(rgb_frame.get());
|
||||
using namespace std::chrono;
|
||||
|
||||
auto now = high_resolution_clock::now();
|
||||
connection->framebufferUpdateEnd();
|
||||
const auto duration = duration_cast<milliseconds>(high_resolution_clock::now() - now).count();
|
||||
|
||||
//vlog.info("Frame took %lu ms", duration);
|
||||
stats.total += duration;
|
||||
stats.timings.push_back(duration);
|
||||
}
|
||||
}
|
||||
}
|
||||
av_packet_unref_f(packet.get());
|
||||
}
|
||||
|
||||
if (av_seek_frame_f(format_ctx, video_stream_idx, 0, AVSEEK_FLAG_BACKWARD) < 0)
|
||||
throw std::runtime_error("Could not seek to start of video");
|
||||
|
||||
avcodec_flush_buffers_f(codec_ctx);
|
||||
|
||||
return stats;
|
||||
}
|
@ -18,7 +18,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include "LogWriter.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
@ -26,38 +29,140 @@ extern "C" {
|
||||
#include <libswscale/swscale.h>
|
||||
}
|
||||
|
||||
struct AVFormatContextDeleter {
|
||||
void operator()(AVFormatContext *ctx) const {
|
||||
avformat_close_input(&ctx);
|
||||
}
|
||||
};
|
||||
#include "benchmark.h"
|
||||
|
||||
struct AVCodecContextDeleter {
|
||||
void operator()(AVCodecContext *ctx) const {
|
||||
avcodec_free_context(&ctx);
|
||||
}
|
||||
};
|
||||
#define STR_HELPER(x) #x
|
||||
#define STR(x) STR_HELPER(x)
|
||||
#define CONCAT_STR(a, b) a b
|
||||
|
||||
struct AVFrameDeleter {
|
||||
void operator()(AVFrame *frame) const {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
};
|
||||
#define D_LOOKUP_SYM(handle, name) \
|
||||
[](auto handle, auto *sym_name) -> auto { \
|
||||
auto *sym = reinterpret_cast<name##_func>(dlsym(handle, sym_name)); \
|
||||
if (!sym) \
|
||||
throw std::runtime_error("Failed to load symbol "s + sym_name); \
|
||||
return sym; \
|
||||
}(handle, STR(name))
|
||||
|
||||
struct SwsContextDeleter {
|
||||
void operator()(SwsContext *ctx) const {
|
||||
sws_freeContext(ctx);
|
||||
}
|
||||
};
|
||||
#define DEFINE_GUARD(name, type, deleter) \
|
||||
using name##Guard = std::unique_ptr<type, decltype([](auto *ptr){deleter##_f(&ptr);})>;
|
||||
|
||||
struct PacketDeleter {
|
||||
void operator()(AVPacket *packet) const {
|
||||
av_packet_free(&packet);
|
||||
}
|
||||
};
|
||||
//using SwsContextGuard = std::unique_ptr<SwsContext, SwsContextDeleter>;
|
||||
|
||||
using FormatCtxGuard = std::unique_ptr<AVFormatContext, AVFormatContextDeleter>;
|
||||
using CodecCtxGuard = std::unique_ptr<AVCodecContext, AVCodecContextDeleter>;
|
||||
using FrameGuard = std::unique_ptr<AVFrame, AVFrameDeleter>;
|
||||
using SwsContextGuard = std::unique_ptr<SwsContext, SwsContextDeleter>;
|
||||
using PacketGuard = std::unique_ptr<AVPacket, PacketDeleter>;
|
||||
class FFmpegFrameFeeder final {
|
||||
// libavformat
|
||||
using avformat_close_input_func = void(*)(AVFormatContext **);
|
||||
using avformat_open_input_func = int(*)(AVFormatContext **ps, const char *url, const AVInputFormat *fmt,
|
||||
AVDictionary **options);
|
||||
using avformat_find_stream_info_func = int (*)(AVFormatContext *ic, AVDictionary **options);
|
||||
using av_read_frame_func = int (*)(AVFormatContext *s, AVPacket *pkt);
|
||||
using av_seek_frame_func = int (*)(AVFormatContext *s, int stream_index, int64_t timestamp, int flags);
|
||||
|
||||
// libavutil
|
||||
using av_frame_free_func = void (*)(AVFrame **);
|
||||
using av_frame_alloc_func = AVFrame *(*)();
|
||||
using av_frame_get_buffer_func = int (*)(AVFrame *frame, int align);
|
||||
|
||||
// libswscale
|
||||
using sws_freeContext_func = void (*)(SwsContext *);
|
||||
using sws_getContext_func = SwsContext * (*)(int srcW, int srcH, AVPixelFormat srcFormat, int dstW, int dstH,
|
||||
AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter,
|
||||
SwsFilter *dstFilter, const double *param);
|
||||
|
||||
using sws_scale_func = int(*)(SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY,
|
||||
int srcSliceH, uint8_t *const dst[], const int dstStride[]);
|
||||
|
||||
// libavcodec
|
||||
using avcodec_free_context_func = void (*)(AVCodecContext **);
|
||||
using av_packet_free_func = void (*)(AVPacket **);
|
||||
using avcodec_find_decoder_func = const AVCodec * (*)(AVCodecID id);
|
||||
using avcodec_alloc_context3_func = AVCodecContext* (*)(const AVCodec *codec);
|
||||
using avcodec_parameters_to_context_func = int (*)(AVCodecContext *codec, const AVCodecParameters *par);
|
||||
using avcodec_open2_func = int (*)(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
|
||||
using av_packet_alloc_func = AVPacket *(*)();
|
||||
using avcodec_send_packet_func = int(*)(AVCodecContext *avctx, const AVPacket *avpkt);
|
||||
using avcodec_receive_frame_func = int(*)(AVCodecContext *avctx, AVFrame *frame);
|
||||
using av_packet_unref_func = void (*)(AVPacket *pkt);
|
||||
using avcodec_flush_buffers_func = void (*)(AVCodecContext *avctx);
|
||||
using avcodec_close_func = int (*)(AVCodecContext *avctx);
|
||||
|
||||
struct DlHandler {
|
||||
void operator()(void *handle) const {
|
||||
dlclose(handle);
|
||||
}
|
||||
};
|
||||
|
||||
using DlHandlerGuard = std::unique_ptr<void, DlHandler>;
|
||||
|
||||
// libavformat
|
||||
avformat_close_input_func avformat_close_input_f{};
|
||||
avformat_open_input_func avformat_open_input_f{};
|
||||
avformat_find_stream_info_func avformat_find_stream_info_f{};
|
||||
av_read_frame_func av_read_frame_f{};
|
||||
av_seek_frame_func av_seek_frame_f{};
|
||||
|
||||
// libavutil
|
||||
static inline av_frame_free_func av_frame_free_f{};
|
||||
av_frame_alloc_func av_frame_alloc_f{};
|
||||
av_frame_get_buffer_func av_frame_get_buffer_f{};
|
||||
|
||||
// libswscale
|
||||
sws_freeContext_func sws_freeContext_f{};
|
||||
sws_getContext_func sws_getContext_f{};
|
||||
sws_scale_func sws_scale_f{};
|
||||
|
||||
// libavcodec
|
||||
avcodec_free_context_func avcodec_free_context_f{};
|
||||
static inline av_packet_free_func av_packet_free_f{};
|
||||
avcodec_find_decoder_func avcodec_find_decoder_f{};
|
||||
avcodec_alloc_context3_func avcodec_alloc_context3_f{};
|
||||
avcodec_parameters_to_context_func avcodec_parameters_to_context_f{};
|
||||
avcodec_open2_func avcodec_open2_f{};
|
||||
av_packet_alloc_func av_packet_alloc_f{};
|
||||
avcodec_send_packet_func avcodec_send_packet_f{};
|
||||
avcodec_receive_frame_func avcodec_receive_frame_f{};
|
||||
av_packet_unref_func av_packet_unref_f{};
|
||||
avcodec_flush_buffers_func avcodec_flush_buffers_f{};
|
||||
avcodec_close_func avcodec_close_f{};
|
||||
|
||||
rfb::LogWriter vlog{"FFmpeg"};
|
||||
|
||||
DEFINE_GUARD(Frame, AVFrame, av_frame_free)
|
||||
DEFINE_GUARD(Packet, AVPacket, av_packet_free)
|
||||
|
||||
AVFormatContext *format_ctx{};
|
||||
AVCodecContext *codec_ctx{};
|
||||
int video_stream_idx{-1};
|
||||
|
||||
DlHandlerGuard libavformat{};
|
||||
DlHandlerGuard libavutil{};
|
||||
DlHandlerGuard libswscale{};
|
||||
DlHandlerGuard libavcodec{};
|
||||
|
||||
public:
|
||||
FFmpegFrameFeeder();
|
||||
|
||||
~FFmpegFrameFeeder();
|
||||
|
||||
void open(std::string_view path);
|
||||
|
||||
[[nodiscard]] int64_t get_total_frame_count() const {
|
||||
return format_ctx->streams[video_stream_idx]->nb_frames;
|
||||
}
|
||||
|
||||
struct frame_dimensions_t {
|
||||
int width{};
|
||||
int height{};
|
||||
};
|
||||
|
||||
[[nodiscard]] frame_dimensions_t get_frame_dimensions() const {
|
||||
return {codec_ctx->width, codec_ctx->height};
|
||||
}
|
||||
|
||||
struct play_stats_t {
|
||||
uint64_t frames{};
|
||||
uint64_t total{};
|
||||
std::vector<uint64_t> timings;
|
||||
};
|
||||
|
||||
play_stats_t play(benchmarking::MockTestConnection *connection) const;
|
||||
};
|
||||
|
@ -184,7 +184,7 @@ static void parseClipTypes()
|
||||
|
||||
vlog.debug("Adding DLP binary mime type %s", m.mime);
|
||||
}
|
||||
vlog.debug("Total %u binary mime types", dlp_mimetypes.size());
|
||||
vlog.debug("Total %lu binary mime types", dlp_mimetypes.size());
|
||||
|
||||
free(origstr);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user