summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpommicket <pommicket@gmail.com>2025-02-23 13:29:17 -0500
committerpommicket <pommicket@gmail.com>2025-02-25 15:16:08 -0500
commit78f28b310251cd3e35d588c9f1476e3d0ef6d983 (patch)
treea077d205c55a8daefc18085ac6c589ce1aacf003
parent31d0e293b384ac89543c309c8c3b300130495c2e (diff)
actual video recording is workign!
-rw-r--r--camera.c34
-rw-r--r--camera.h5
-rw-r--r--main.c61
3 files changed, 73 insertions, 27 deletions
diff --git a/camera.c b/camera.c
index 74434c8..b9b2200 100644
--- a/camera.c
+++ b/camera.c
@@ -10,6 +10,7 @@
#include "ds.h"
#include "3rd_party/stb_image_write.h"
#include <jpeglib.h>
+#include <libavcodec/avcodec.h>
#define CAMERA_MAX_BUFFERS 4
struct Camera {
@@ -925,3 +926,36 @@ void camera_hash_str(Camera *camera, char str[HASH_SIZE * 2 + 1]) {
const char *camera_devnode(Camera *camera) {
return camera->devnode;
}
+
+bool camera_copy_to_av_frame(Camera *camera, struct AVFrame *frame_out) {
+ uint8_t *frame_in = camera_curr_frame(camera);
+ int32_t frame_width = camera_frame_width(camera);
+ int32_t frame_height = camera_frame_height(camera);
+ if (!frame_in
+ || frame_width != frame_out->width
+ || frame_height != frame_out->height
+ || camera_pixel_format(camera) != V4L2_PIX_FMT_YUV420
+ || frame_out->format != AV_PIX_FMT_YUV420P) {
+ return false;
+ }
+ // copy Y plane
+ for (int64_t y = 0; y < frame_height; y++) {
+ memcpy(&frame_out->data[0][y * frame_out->linesize[0]],
+ &frame_in[y * frame_width], frame_width);
+ }
+ // copy Cb plane
+ int64_t cb_offset = (int64_t)frame_width * frame_height;
+ for (int64_t y = 0; y < frame_height / 2; y++) {
+ memcpy(&frame_out->data[1][y * frame_out->linesize[1]],
+ &frame_in[cb_offset + y * (frame_width / 2)],
+ frame_width / 2);
+ }
+ // copy Cr plane
+ int64_t cr_offset = cb_offset + (int64_t)frame_width / 2 * frame_height / 2;
+ for (int64_t y = 0; y < frame_height / 2; y++) {
+ memcpy(&frame_out->data[2][y * frame_out->linesize[2]],
+ &frame_in[cr_offset + y * (frame_width / 2)],
+ frame_width / 2);
+ }
+ return true;
+}
diff --git a/camera.h b/camera.h
index 0ea646d..56f30ee 100644
--- a/camera.h
+++ b/camera.h
@@ -6,6 +6,7 @@
#include <stddef.h>
#include <string.h>
#include <GL/glcorearb.h>
+struct AVFrame;
typedef uint32_t PixelFormat;
typedef struct Camera Camera;
@@ -127,6 +128,10 @@ bool camera_open(Camera *camera);
Hash camera_hash(Camera *camera);
void camera_hash_str(Camera *camera, char str[HASH_SIZE * 2 + 1]);
bool camera_set_format(Camera *camera, PictureFormat picfmt, CameraAccessMethod access, bool force);
+/// Copy current frame from camera to AVFrame.
+///
+/// Returns `true` on success. Currently only works if both the camera and the AVFrame are in the YUV420 format.
+bool camera_copy_to_av_frame(Camera *camera, struct AVFrame *frame);
void camera_free(Camera *camera);
#endif
diff --git a/main.c b/main.c
index 72527bd..2a1bb56 100644
--- a/main.c
+++ b/main.c
@@ -88,6 +88,7 @@ typedef struct {
bool quit;
CameraMode mode;
bool recording_video;
+ double video_start_time;
AVFormatContext *avf_context;
AVCodecContext *video_encoder;
AVFrame *video_frame;
@@ -591,7 +592,6 @@ static bool write_frame(State *state, AVCodecContext *encoder, AVStream *stream,
return true;
}
-
static void stop_video(State *state) {
if (state->recording_video) {
state->recording_video = false;
@@ -651,11 +651,13 @@ static bool start_video(State *state, const char *filename) {
return false;
}
state->video_encoder->codec_id = fmt->video_codec;
- // TODO: adjustable video framerate
- state->video_encoder->bit_rate = (int64_t)5 * camera_frame_width(state->camera) * camera_frame_height(state->camera);
+ // TODO: adjustable video quality
+ const int64_t quality = 5;
+ state->video_encoder->bit_rate = quality * camera_frame_width(state->camera) * camera_frame_height(state->camera);
state->video_encoder->width = camera_frame_width(state->camera);
state->video_encoder->height = camera_frame_height(state->camera);
- state->video_encoder->time_base = state->video_stream->time_base = (AVRational){1,30};// TODO: restrict application to 30FPS when recording video
+ // TODO: adjustable video framerate
+ state->video_encoder->time_base = state->video_stream->time_base = (AVRational){1,30};
state->video_encoder->gop_size = 12;
state->video_encoder->pix_fmt = AV_PIX_FMT_YUV420P;
if (state->avf_context->oformat->flags & AVFMT_GLOBALHEADER)
@@ -695,23 +697,7 @@ static bool start_video(State *state, const char *filename) {
return false;
}
state->recording_video = true;
- // ----
- for (int frame = 0; frame < 300; frame++) {
- err = av_frame_make_writable(state->video_frame);
- if (err < 0) {
- fprintf(stderr, "error: av_frame_make_writable: %s\n", av_err2str(err));
- return false;
- }
- for (int y = 0; y < state->video_frame->height; y++) {
- for (int x = 0; x < state->video_frame->width; x++) {
- state->video_frame->data[0][y * state->video_frame->linesize[0] + x] = (uint8_t)(x + y+frame);
- state->video_frame->data[1][(y/2) * state->video_frame->linesize[1] + x/2] = (uint8_t)(x * y);
- state->video_frame->data[2][(y/2) * state->video_frame->linesize[2] + x/2] = (uint8_t)(x - y);
- }
- }
- state->video_frame->pts = state->video_pts++;
- write_frame(state, state->video_encoder, state->video_stream, state->video_frame);
- }
+ state->video_start_time = get_time_double();
return true;
}
@@ -1001,10 +987,10 @@ void main() {\n\
o_color = vec4(mix(color, vec3(1.0), u_flash), opacity);\n\
}\n\
";
- char err[256] = {0};
- GLuint program = gl_compile_and_link_shaders(err, vshader_code, fshader_code);
- if (*err) {
- fatal_error("Couldn't compile shader: %s", err);
+ static char shader_err[256] = {0};
+ GLuint program = gl_compile_and_link_shaders(shader_err, vshader_code, fshader_code);
+ if (*shader_err) {
+ fatal_error("Couldn't compile shader: %s", shader_err);
}
if (program == 0) {
fatal_error("Couldn't compile shader (no error log available)");
@@ -1150,6 +1136,7 @@ void main() {\n\
}
break;
case SDLK_TAB:
+ if (state->recording_video) break;
state->mode = (state->mode + 1) % MODE_COUNT;
switch (state->mode) {
case MODE_PICTURE:
@@ -1416,7 +1403,9 @@ void main() {\n\
const char *text[] = {
"F1 - open this help screen",
"F2 - show debug info",
- "Space - take a picture",
+ state->mode == MODE_VIDEO
+ ? "Space - start/stop recording"
+ : "Space - take a picture",
"Escape - open/close settings",
"Ctrl+f - open picture directory",
"Tab - switch between picture and video",
@@ -1493,6 +1482,23 @@ void main() {\n\
last_camera_time = curr_time;
n_active_textures = camera_update_gl_textures(state->camera, camera_textures);
}
+ if (state->recording_video) {
+ int64_t next_pts = state->video_pts;
+ int64_t curr_pts = (int64_t)((curr_time - state->video_start_time)
+ * state->video_encoder->time_base.den
+ / state->video_encoder->time_base.num);
+ if (curr_pts >= next_pts) {
+ int err = av_frame_make_writable(state->video_frame);
+ if (err < 0) {
+ fprintf(stderr, "error: av_frame_make_writable: %s\n", av_err2str(err));
+ return EXIT_FAILURE;
+ }
+ state->video_frame->pts = curr_pts;
+ camera_copy_to_av_frame(state->camera, state->video_frame);
+ write_frame(state, state->video_encoder, state->video_stream, state->video_frame);
+ state->video_pts = curr_pts + 1;
+ }
+ }
gl.Uniform1i(u_pixel_format, last_frame_pixfmt);
gl.ActiveTexture(GL_TEXTURE0);
// we always want to bind something to every texture slot,
@@ -1586,7 +1592,7 @@ void main() {\n\
SDL_FreeSurface(surf);
}
gl.Uniform2f(u_scale, gl_width, gl_height);
- gl.Uniform2f(u_offset, 1 - gl_width, 1 - gl_height);
+ gl.Uniform2f(u_offset, 0.99f - gl_width, 1 - gl_height);
gl.Uniform1i(u_sampler, 0);
gl.Uniform1f(u_opacity, 1);
gl.Uniform1i(u_pixel_format, V4L2_PIX_FMT_RGBA32);
@@ -1650,6 +1656,7 @@ void main() {\n\
SDL_GL_SwapWindow(window);
}
quit:
+ stop_video(state);
udev_monitor_unref(udev_monitor);
udev_unref(udev);
arr_foreach_ptr(state->cameras, Camera *, pcamera) {