chore: add recording mode for status page generation

This commit is contained in:
Almamu 2025-05-10 05:29:13 +02:00
parent 30d977ae57
commit 010f14cc67
5 changed files with 267 additions and 1 deletions

View File

@ -12,6 +12,10 @@ set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules")
set(OpenGL_GL_PREFERENCE "LEGACY") set(OpenGL_GL_PREFERENCE "LEGACY")
set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
if(NOT DEMOMODE)
set(DEMOMODE 0)
endif()
if(NOT ERRORONLY) if(NOT ERRORONLY)
set(ERRORONLY 0) set(ERRORONLY 0)
endif() endif()
@ -20,6 +24,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing")
# if you're developing you might find this debug option useful for shader output, although RenderDoc is encouraged # if you're developing you might find this debug option useful for shader output, although RenderDoc is encouraged
add_compile_definitions(ERRORONLY=${ERRORONLY}) add_compile_definitions(ERRORONLY=${ERRORONLY})
add_compile_definitions(DEMOMODE=${DEMOMODE})
find_package(X11) find_package(X11)
find_package(OpenGL REQUIRED) find_package(OpenGL REQUIRED)
@ -207,6 +212,12 @@ if(X11_SUPPORT_FOUND)
endif() endif()
endif() endif()
if(DEMOMODE)
set(DEMOMODE_SOURCES
src/recording.cpp
src/recording.h)
message(WARNING "Enabling demo mode will automatically record 5 seconds and stop the software. This is used internally to produce the video seen on the website as a sort of status report")
endif()
if(NOT WAYLAND_SUPPORT_FOUND AND NOT X11_SUPPORT_FOUND) if(NOT WAYLAND_SUPPORT_FOUND AND NOT X11_SUPPORT_FOUND)
message(WARNING "No window server detected at build time. You will only be able to preview backgrounds") message(WARNING "No window server detected at build time. You will only be able to preview backgrounds")
endif() endif()
@ -500,7 +511,8 @@ add_executable(
src/WallpaperEngine/Core/Objects/Images/Materials/CPass.h src/WallpaperEngine/Core/Objects/Images/Materials/CPass.h
${WAYLAND_SOURCES} ${WAYLAND_SOURCES}
${X11_SOURCES}) ${X11_SOURCES}
${DEMOMODE_SOURCES})
target_link_libraries (linux-wallpaperengine PUBLIC target_link_libraries (linux-wallpaperengine PUBLIC
${OPENGL_LIBRARIES} ${OPENGL_LIBRARIES}

View File

@ -272,6 +272,13 @@ CApplicationContext::CApplicationContext (int argc, char* argv []) :
this->state.audio.enabled = this->settings.audio.enabled; this->state.audio.enabled = this->settings.audio.enabled;
this->state.audio.volume = this->settings.audio.volume; this->state.audio.volume = this->settings.audio.volume;
this->state.mouse.enabled = this->settings.mouse.enabled; this->state.mouse.enabled = this->settings.mouse.enabled;
#if DEMOMODE
sLog.error ("WARNING: RUNNING IN DEMO MODE WILL STOP WALLPAPERS AFTER 5 SECONDS SO VIDEO CAN BE RECORDED");
// special settings for demomode
this->settings.screenshot.take = false;
this->settings.render.pauseOnFullscreen = false;
#endif /* DEMOMODE */
} }
int CApplicationContext::getArgc () const { int CApplicationContext::getArgc () const {

View File

@ -12,6 +12,10 @@
#include "WallpaperEngine/Core/Wallpapers/CWeb.h" #include "WallpaperEngine/Core/Wallpapers/CWeb.h"
#include "WallpaperEngine/Render/Drivers/CVideoFactories.h" #include "WallpaperEngine/Render/Drivers/CVideoFactories.h"
#if DEMOMODE
#include "recording.h"
#endif /* DEMOMODE */
#include <unistd.h> #include <unistd.h>
#define STB_IMAGE_WRITE_IMPLEMENTATION #define STB_IMAGE_WRITE_IMPLEMENTATION
#include <stb_image_write.h> #include <stb_image_write.h>
@ -391,6 +395,19 @@ void CWallpaperApplication::show () {
std::cout << prettyPrinter.str () << std::endl; std::cout << prettyPrinter.str () << std::endl;
} }
#if DEMOMODE
// ensure only one background is running so everything can be properly caught
if (this->m_renderContext->getWallpapers ().size () > 1) {
sLog.exception ("Demo mode only supports one background");
}
int width = this->m_renderContext->getWallpapers ().begin ()->second->getWidth ();
int height = this->m_renderContext->getWallpapers ().begin ()->second->getHeight ();
std::vector<uint8_t> pixels(width * height * 3);
init_encoder ("output.webm", width, height);
int frame = 0;
#endif /* DEMOMODE */
while (this->m_context.state.general.keepRunning) { while (this->m_context.state.general.keepRunning) {
// update g_Daytime // update g_Daytime
time (&seconds); time (&seconds);
@ -412,6 +429,23 @@ void CWallpaperApplication::show () {
sLog.out ("Stop requested by driver"); sLog.out ("Stop requested by driver");
this->m_context.state.general.keepRunning = false; this->m_context.state.general.keepRunning = false;
} }
#if DEMOMODE
// do not record frames unless a second has passed
if (g_Time > 1) {
glBindFramebuffer (GL_FRAMEBUFFER, this->m_renderContext->getWallpapers ().begin ()->second->getWallpaperFramebuffer());
glPixelStorei (GL_PACK_ALIGNMENT, 1);
glReadPixels (0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, pixels.data ());
write_video_frame (pixels.data ());
frame ++;
// stop after the given framecount
if (frame >= FRAME_COUNT) {
this->m_context.state.general.keepRunning = false;
}
}
#endif /* DEMOMODE */
// check for fullscreen windows and wait until there's none fullscreen // check for fullscreen windows and wait until there's none fullscreen
if (this->m_fullScreenDetector->anythingFullscreen () && this->m_context.state.general.keepRunning) { if (this->m_fullScreenDetector->anythingFullscreen () && this->m_context.state.general.keepRunning) {
m_renderContext->setPause (true); m_renderContext->setPause (true);
@ -429,6 +463,10 @@ void CWallpaperApplication::show () {
sLog.out ("Stopping"); sLog.out ("Stopping");
#if DEMOMODE
close_encoder ();
#endif /* DEMOMODE */
SDL_Quit (); SDL_Quit ();
} }

193
src/recording.cpp Normal file
View File

@ -0,0 +1,193 @@
#if DEMOMODE
// this file is horrible, but doesn't need to be anything good as it's only used internally
#include "recording.h"
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <iostream>
#include <thread>
#include <vector>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
}
const int FPS = 30;
const int FRAME_COUNT = 150;
int WIDTH = 0;
int HEIGHT = 0;
int SOURCE_WIDTH = 0;
int SOURCE_HEIGHT = 0;
int64_t frame_count = 0;
// Global variables to hold encoder context and output stream
const AVCodec *video_codec = nullptr;
AVCodecContext *video_codec_context = nullptr;
AVFormatContext *format_context = nullptr;
AVStream *video_stream = nullptr;
SwsContext *sws_context = nullptr;
AVFrame *video_frame = nullptr;
AVFrame* rgb_frame = nullptr;
int init_encoder(const char *output_file, int sourceWidth, int sourceHeight) {
float factor = 512.0f / (float) sourceWidth;
SOURCE_WIDTH = sourceWidth;
SOURCE_HEIGHT = sourceHeight;
WIDTH = (float) sourceWidth * factor;
HEIGHT = (float) sourceHeight * factor;
avformat_network_init();
// Initialize the output format context
if (avformat_alloc_output_context2(&format_context, nullptr, "webm", output_file) < 0) {
std::cerr << "Error initializing format context" << std::endl;
return -1;
}
// Video codec: VP8
video_codec = avcodec_find_encoder(AV_CODEC_ID_VP9);
if (!video_codec) {
std::cerr << "VP8 codec not found!" << std::endl;
return -1;
}
video_codec_context = avcodec_alloc_context3(video_codec);
if (!video_codec_context) {
std::cerr << "Error allocating video codec context" << std::endl;
return -1;
}
video_codec_context->bit_rate = 4000000;
video_codec_context->width = WIDTH;
video_codec_context->height = HEIGHT;
video_codec_context->pix_fmt = AV_PIX_FMT_YUV420P;
video_codec_context->time_base = (AVRational){1, FPS};
video_codec_context->framerate = (AVRational){FPS, 1};
video_codec_context->gop_size = 12;
video_codec_context->max_b_frames = 1;
video_codec_context->qmin = 10;
video_codec_context->qmax = 40;
if (avcodec_open2(video_codec_context, video_codec, nullptr) < 0) {
std::cerr << "Error opening VP8 codec" << std::endl;
return -1;
}
// Create the video stream in the format context
video_stream = avformat_new_stream(format_context, video_codec);
if (!video_stream) {
std::cerr << "Error creating video stream" << std::endl;
return -1;
}
// Copy codec parameters from the codec context to the stream
if (avcodec_parameters_from_context(video_stream->codecpar, video_codec_context) < 0) {
std::cerr << "Error copying codec parameters to stream" << std::endl;
return -1;
}
video_stream->time_base = video_codec_context->time_base;
// Open output file for writing
if (avio_open(&format_context->pb, output_file, AVIO_FLAG_WRITE) < 0) {
std::cerr << "Error opening output file" << std::endl;
return -1;
}
// Write file header
if (avformat_write_header(format_context, nullptr) < 0) {
std::cerr << "Error writing file header" << std::endl;
return -1;
}
// Allocate video frame
video_frame = av_frame_alloc();
video_frame->format = AV_PIX_FMT_YUV420P;
video_frame->width = WIDTH;
video_frame->height = HEIGHT;
av_frame_get_buffer(video_frame, 0);
rgb_frame = av_frame_alloc();
rgb_frame->format = AV_PIX_FMT_RGB24;
rgb_frame->width = SOURCE_WIDTH;
rgb_frame->height = SOURCE_HEIGHT;
// Set up YUV conversion context (RGB to YUV)
sws_context = sws_getContext(SOURCE_WIDTH, SOURCE_HEIGHT, AV_PIX_FMT_RGB24,
WIDTH, HEIGHT, AV_PIX_FMT_YUV420P,
SWS_BICUBIC, nullptr, nullptr, nullptr);
return 0;
}
int write_video_frame(const uint8_t *rgb_data) {
av_image_fill_arrays(rgb_frame->data, rgb_frame->linesize, rgb_data, AV_PIX_FMT_RGB24, SOURCE_WIDTH, SOURCE_HEIGHT, 1);
sws_scale(sws_context, rgb_frame->data, rgb_frame->linesize, 0, SOURCE_HEIGHT, video_frame->data, video_frame->linesize);
// Send the frame to the encoder
int ret = avcodec_send_frame(video_codec_context, video_frame);
if (ret < 0) {
std::cerr << "Error sending video frame: " << ret << std::endl;
return -1;
}
AVPacket packet;
av_init_packet(&packet);
packet.data = nullptr;
packet.size = 0;
// Receive the encoded packet from the encoder
ret = avcodec_receive_packet(video_codec_context, &packet);
if (ret < 0) {
std::cerr << "Error receiving video packet: " << ret << std::endl;
return -1;
}
packet.stream_index = video_stream->index;
// Set the PTS and DTS values
packet.pts = av_rescale_q(frame_count, video_codec_context->time_base, video_stream->time_base);
packet.dts = packet.pts; // For simplicity, you can set DTS equal to PTS for now
packet.duration = av_rescale_q(1, video_codec_context->time_base, video_stream->time_base);
// Increment frame counter
frame_count++;
// Write the encoded video packet to the file
ret = av_interleaved_write_frame(format_context, &packet);
if (ret < 0) {
std::cerr << "Error writing video packet: " << ret << std::endl;
return -1;
}
// Ensure that the packet is freed
av_packet_unref(&packet);
return 0;
}
int close_encoder() {
// Write any remaining frames (flush encoder)
avcodec_flush_buffers(video_codec_context);
// Write the trailer
av_write_trailer(format_context);
// Clean up
avcodec_free_context(&video_codec_context);
avformat_free_context(format_context);
av_frame_free(&video_frame);
sws_freeContext(sws_context);
return 0;
}
#endif /* DEMOMODE */

16
src/recording.h Normal file
View File

@ -0,0 +1,16 @@
#pragma once
#if DEMOMODE
#include <cstdint>
#include <cstdlib>
#include <vector>
extern const int FPS;
extern const int FRAME_COUNT;
int init_encoder(const char *output_file, int sourceWidth, int sourceHeight);
int write_video_frame(const uint8_t *rgb_data);
int close_encoder();
#endif /* DEMOMODE */