summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpommicket <pommicket@gmail.com>2025-02-25 15:15:10 -0500
committerpommicket <pommicket@gmail.com>2025-02-25 15:16:09 -0500
commitb6656786caa1b5247e251e63d15f3173bcf3d26f (patch)
treea5c3c722dfe535a1372096ec21ed3e5353bce8ab
parent5626363c05bd379047cbe102feaceb18a04a738c (diff)
logging
-rw-r--r--camera.c70
-rw-r--r--log.c14
-rw-r--r--log.h2
-rw-r--r--main.c12
-rw-r--r--video.c68
5 files changed, 96 insertions, 70 deletions
diff --git a/camera.c b/camera.c
index 397bdc1..b344ca2 100644
--- a/camera.c
+++ b/camera.c
@@ -11,7 +11,7 @@
#include "3rd_party/stb_image_write.h"
#include <jpeglib.h>
#include <libavcodec/avcodec.h>
-#include "util.h"
+#include "log.h"
#define CAMERA_MAX_BUFFERS 4
struct Camera {
@@ -168,7 +168,7 @@ static bool camera_setup_with_read(Camera *camera) {
uint32_t image_size = camera->curr_format.fmt.pix.sizeimage;
camera->read_frame = realloc(camera->read_frame, image_size);
if (!camera->read_frame) {
- perror("realloc");
+ log_perror("realloc camera->read_frame to %" PRIu32, image_size);
return false;
}
memset(camera->read_frame, 0, image_size);
@@ -182,7 +182,7 @@ static bool camera_setup_with_mmap(Camera *camera) {
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
if (v4l2_ioctl(camera->fd, VIDIOC_REQBUFS, &req) != 0) {
- perror("v4l2_ioctl VIDIOC_REQBUFS");
+ log_perror("v4l2_ioctl VIDIOC_REQBUFS \"%s\"", camera->name);
return false;
}
camera->buffer_count = req.count;
@@ -192,7 +192,7 @@ static bool camera_setup_with_mmap(Camera *camera) {
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (v4l2_ioctl(camera->fd, VIDIOC_QUERYBUF, &buf) != 0) {
- perror("v4l2_ioctl VIDIOC_QUERYBUF");
+ log_perror("v4l2_ioctl VIDIOC_QUERYBUF \"%s\" %d", camera->name, i);
return false;
}
camera->mmap_size[i] = buf.length;
@@ -200,7 +200,7 @@ static bool camera_setup_with_mmap(Camera *camera) {
MAP_SHARED, camera->fd, buf.m.offset);
if (camera->mmap_frames[i] == MAP_FAILED) {
camera->mmap_frames[i] = NULL;
- perror("mmap");
+ log_perror("mmap");
return false;
}
}
@@ -210,14 +210,14 @@ static bool camera_setup_with_mmap(Camera *camera) {
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (v4l2_ioctl(camera->fd, VIDIOC_QBUF, &buf) != 0) {
- perror("v4l2_ioctl VIDIOC_QBUF");
+ log_perror("v4l2_ioctl VIDIOC_QBUF \"%s\" %d", camera->name, i);
return false;
}
}
if (v4l2_ioctl(camera->fd,
VIDIOC_STREAMON,
(enum v4l2_buf_type[1]) { V4L2_BUF_TYPE_VIDEO_CAPTURE }) != 0) {
- perror("v4l2_ioctl VIDIOC_STREAMON");
+ log_perror("v4l2_ioctl VIDIOC_STREAMON \"%s\"", camera->name);
return false;
}
return true;
@@ -287,7 +287,7 @@ TODO: test me with a camera that supports userptr i/o
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_USERPTR;
if (v4l2_ioctl(camera->fd, VIDIOC_REQBUFS, &req) != 0) {
- perror("v4l2_ioctl VIDIOC_REQBUFS");
+ log_perror("v4l2_ioctl VIDIOC_REQBUFS");
return false;
}
for (int i = 0; i < CAMERA_MAX_BUFFERS; i++) {
@@ -299,13 +299,13 @@ TODO: test me with a camera that supports userptr i/o
buf.m.userptr = (unsigned long)camera->userp_frames[i];
buf.length = camera->curr_format.fmt.pix.sizeimage;
if (v4l2_ioctl(camera->fd, VIDIOC_QBUF, &buf) != 0) {
- perror("v4l2_ioctl VIDIOC_QBUF");
+ log_perror("v4l2_ioctl VIDIOC_QBUF");
}
}
if (v4l2_ioctl(camera->fd,
VIDIOC_STREAMON,
(enum v4l2_buf_type[1]) { V4L2_BUF_TYPE_VIDEO_CAPTURE }) != 0) {
- perror("v4l2_ioctl VIDIOC_STREAMON");
+ log_perror("v4l2_ioctl VIDIOC_STREAMON");
return false;
}
return true;*/
@@ -316,7 +316,7 @@ static bool camera_stop_io(Camera *camera) {
camera->any_frames = false;
if (v4l2_ioctl(camera->fd, VIDIOC_STREAMOFF,
(enum v4l2_buf_type[1]) { V4L2_BUF_TYPE_VIDEO_CAPTURE }) != 0) {
- perror("v4l2_ioctl VIDIOC_STREAMOFF");
+ log_perror("v4l2_ioctl VIDIOC_STREAMOFF \"%s\"", camera->name);
}
camera->streaming = false;
// Just doing VIDIOC_STREAMOFF doesn't seem to be enough to prevent EBUSY.
@@ -324,11 +324,11 @@ static bool camera_stop_io(Camera *camera) {
v4l2_close(camera->fd);
camera->fd = v4l2_open(camera->devnode, O_RDWR);
if (camera->fd < 0) {
- perror("v4l2_open");
+ log_perror("v4l2_open \"%s\"", camera->devnode);
return false;
}
if (v4l2_ioctl(camera->fd, VIDIOC_S_INPUT, &camera->input_idx) != 0) {
- perror("v4l2_ioctl");
+ log_perror("v4l2_ioctl VIDIOC_S_INPUT \"%s\" %" PRIu32, camera->name, camera->input_idx);
camera_close(camera);
return false;
}
@@ -497,7 +497,7 @@ bool camera_save_jpg(Camera *camera, const char *name, int quality) {
// frame is already in jpeg format
FILE *fp = fopen(name, "wb");
if (!fp) {
- perror("fopen");
+ log_perror("fopen \"%s\"", name);
return false;
}
fwrite(camera_curr_frame(camera), 1, camera->frame_bytes_set, fp);
@@ -564,10 +564,9 @@ bool camera_next_frame(Camera *camera) {
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = memory;
if (v4l2_ioctl(camera->fd, VIDIOC_DQBUF, &buf) != 0) {
- static bool printed_error;
- if (!printed_error) {
- perror("v4l2_ioctl VIDIOC_DQBUF");
- printed_error = true;
+ static atomic_flag printed_error = ATOMIC_FLAG_INIT;
+ if (!atomic_flag_test_and_set(&printed_error)) {
+ log_perror("v4l2_ioctl VIDIOC_DQBUF \"%s\"", camera->name);
}
return false;
}
@@ -601,17 +600,19 @@ bool camera_next_frame(Camera *camera) {
if (!messenger.error)
jpeg_start_decompress(&cinfo);
if (!messenger.error && cinfo.output_components != 3) {
- fprintf(stderr, "JPEG has %d components, instead of 3. That's messed up.\n",
+ log_error("JPEG has %d components, instead of 3. That's messed up.",
cinfo.output_components);
messenger.error = true;
}
if (!messenger.error && (int32_t)cinfo.output_width != frame_width) {
- fprintf(stderr, "JPEG from camera has width %" PRId32 ", but I was expecting %" PRId32 "\n",
+ log_error("JPEG from camera has width %" PRId32 ", but I was expecting %" PRId32,
(int32_t)cinfo.output_width, frame_width);
+ messenger.error = true;
}
if (!messenger.error && (int32_t)cinfo.output_height != frame_height) {
- fprintf(stderr, "JPEG from camera has height %" PRId32 ", but I was expecting %" PRId32 "\n",
+ log_error("JPEG from camera has height %" PRId32 ", but I was expecting %" PRId32,
(int32_t)cinfo.output_height, frame_height);
+ messenger.error = true;
}
if (!messenger.error) {
for (int32_t y = 0; y < frame_height; y++) {
@@ -752,7 +753,7 @@ void camera_close(Camera *camera) {
if (camera->streaming) {
if (v4l2_ioctl(camera->fd, VIDIOC_STREAMOFF,
(enum v4l2_buf_type[1]) { V4L2_BUF_TYPE_VIDEO_CAPTURE }) != 0) {
- perror("v4l2_ioctl VIDIOC_STREAMOFF");
+ log_perror("v4l2_ioctl VIDIOC_STREAMOFF \"%s\"", camera->name);
}
camera->streaming = false;
}
@@ -839,20 +840,23 @@ bool camera_set_format(Camera *camera, PictureFormat picfmt, int desired_framera
format.fmt.pix.pixelformat = pixfmt;
format.fmt.pix.width = picfmt.width;
format.fmt.pix.height = picfmt.height;
- camera->decompression_buf = realloc(camera->decompression_buf, (size_t)3 * picfmt.width * picfmt.height);
+ const size_t decompression_buf_size = (size_t)3 * picfmt.width * picfmt.height;
+ camera->decompression_buf = realloc(camera->decompression_buf, decompression_buf_size);
if (!camera->decompression_buf) {
- perror("realloc");
+ log_perror("realloc camera->decompression_buf to %zu", decompression_buf_size);
camera_close(camera);
return false;
}
if (v4l2_ioctl(camera->fd, VIDIOC_S_FMT, &format) != 0) {
- perror("v4l2_ioctl VIDIOC_S_FMT");
+ log_perror("v4l2_ioctl VIDIOC_S_FMT \"%s\" %dx%d %s", camera->name,
+ format.fmt.pix.width, format.fmt.pix.height,
+ (const char *)(const uint32_t[1]){format.fmt.pix.pixelformat});
camera_close(camera);
return false;
}
camera->curr_format = format;
- uint64_t framerates_supported = camera_framerates_supported(camera);
+ const uint64_t framerates_supported = camera_framerates_supported(camera);
int framerate = 30;
if (desired_framerate) {
// select closest framerate to desired
@@ -876,7 +880,7 @@ bool camera_set_format(Camera *camera, PictureFormat picfmt, int desired_framera
.parm.capture = {.readbuffers = 4, .timeperframe = {1, (uint32_t)framerate}},
};
if (v4l2_ioctl(camera->fd, VIDIOC_S_PARM, &stream_params) != 0) {
- perror("v4l2_ioctl VIDIOC_S_PARM");
+ log_perror("v4l2_ioctl VIDIOC_S_PARM \"%s\" framerate=%d", camera->name, framerate);
// NOTE: even if we don't get the framerate we want, don't fail, but do ensure our reported framerate is correct
v4l2_ioctl(camera->fd, VIDIOC_G_PARM, &stream_params);
}
@@ -917,12 +921,12 @@ bool camera_open(Camera *camera, PictureFormat desired_format, int desired_frame
assert(!camera->userp_frames[0]);
camera->fd = v4l2_open(camera->devnode, O_RDWR | O_CLOEXEC);
if (camera->fd < 0) {
- perror("v4l2_open");
+ log_perror("v4l2_open \"%s\"", camera->devnode);
camera_close(camera);
return false;
}
if (v4l2_ioctl(camera->fd, VIDIOC_S_INPUT, &camera->input_idx) != 0) {
- perror("v4l2_ioctl");
+ log_perror("v4l2_ioctl VIDIOC_S_INPUT \"%s\" %d", camera->name, camera->input_idx);
camera_close(camera);
return false;
}
@@ -942,7 +946,7 @@ static void cameras_from_device_with_fd(const char *dev_path, const char *serial
if (input.type != V4L2_INPUT_TYPE_CAMERA) continue;
Camera *camera = calloc(1, sizeof *camera);
if (!camera) {
- perror("calloc");
+ log_perror("calloc camera (size = %zu)", sizeof *camera);
return;
}
camera->fd = -1;
@@ -985,7 +989,7 @@ static void cameras_from_device_with_fd(const char *dev_path, const char *serial
for (int i = 0; ; i++) {
struct v4l2_frmivalenum ival = {.index = i, .pixel_format = fmtdesc.pixelformat, .width = frame_width, .height = frame_height};
if (v4l2_ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &ival) != 0) {
- if (errno != EINVAL) perror("v4l2_ioctl");
+ if (errno != EINVAL) log_perror("v4l2_ioctl VIDIOC_ENUM_FRAMEINTERVALS");
break;
}
// does anyone actually use continuous/stepwise? probably not.
@@ -1056,7 +1060,7 @@ static void cameras_from_device_with_fd(const char *dev_path, const char *serial
void cameras_from_device(const char *dev_path, const char *serial, Camera ***cameras) {
int fd = v4l2_open(dev_path, O_RDWR | O_CLOEXEC);
if (fd < 0) {
- perror("v4l2_open");
+ log_perror("v4l2_open \"%s\"", dev_path);
return;
}
cameras_from_device_with_fd(dev_path, serial, fd, cameras);
@@ -1092,7 +1096,7 @@ bool camera_copy_to_av_frame(Camera *camera, struct AVFrame *frame_out) {
|| frame_out->format != AV_PIX_FMT_YUV420P) {
static atomic_flag warned = ATOMIC_FLAG_INIT;
if (!atomic_flag_test_and_set_explicit(&warned, memory_order_relaxed)) {
- fprintf(stderr, "%s: Bad picture format.\n", __func__);
+ log_error("%s: Bad picture format.", __func__);
}
return false;
}
diff --git a/log.c b/log.c
index a4e585d..2e0aecb 100644
--- a/log.c
+++ b/log.c
@@ -1,6 +1,7 @@
#include "log.h"
#include <stdbool.h>
#include <unistd.h>
+#include <errno.h>
static FILE *log_file = NULL;
@@ -59,6 +60,19 @@ void log_error(const char *fmt, ...) {
va_end(args);
}
+void log_perror(const char *fmt, ...) {
+ int err = errno;
+ va_list args;
+ va_start(args, fmt);
+ char *prefix = va_sprintf(fmt, args);
+ va_end(args);
+ char error[64];
+ *error = '\0';
+ strerror_r(err, error, sizeof error);
+ log_error("%s: %s", prefix, error);
+ free(prefix);
+}
+
void log_warning(const char *fmt, ...) {
va_list args;
va_start(args, fmt);
diff --git a/log.h b/log.h
index b739837..ee638de 100644
--- a/log.h
+++ b/log.h
@@ -12,5 +12,7 @@ void log_init(const char *out);
void log_message(int severity, const char *fmt, va_list args);
void log_error(PRINTF_FORMAT_STRING const char *fmt, ...) ATTRIBUTE_PRINTF(1, 2);
void log_warning(PRINTF_FORMAT_STRING const char *fmt, ...) ATTRIBUTE_PRINTF(1, 2);
+/// Like `perror`, but outputs to log and allows format string.
+void log_perror(PRINTF_FORMAT_STRING const char *fmt, ...) ATTRIBUTE_PRINTF(1, 2);
#endif
diff --git a/main.c b/main.c
index d80844b..7ab63c5 100644
--- a/main.c
+++ b/main.c
@@ -563,7 +563,7 @@ static bool mkdir_with_parents(const char *path) {
return mkdir_with_parents(path);
}
if (errno != ENOENT) {
- perror("mkdir");
+ log_perror("mkdir");
free(buf);
return false;
}
@@ -667,7 +667,7 @@ static bool take_picture(State *state) {
if (fd == -1 && errno == EEXIST) {
continue;
} else if (fd == -1) {
- perror("can't write to picture directory");
+ log_perror("can't write to picture directory");
return false;
} else {
close(fd);
@@ -1039,7 +1039,7 @@ void main() {\n\
// subsystems don't seem to be set for "remove" events, so we shouldn't do this:
// udev_monitor_filter_add_match_subsystem_devtype(udev_monitor, "video4linux", NULL);
if (!udev_monitor) {
- perror("udev_monitor_new_from_netlink");
+ log_perror("udev_monitor_new_from_netlink");
}
if (udev_monitor) {
// set udev monitor to nonblocking
@@ -1047,7 +1047,7 @@ void main() {\n\
int flags = fcntl(fd, F_GETFL);
flags |= O_NONBLOCK | O_CLOEXEC;
if (fcntl(fd, F_SETFL, flags) != 0) {
- perror("fcntl");
+ log_perror("fcntl");
}
// enable monitor
udev_monitor_enable_receiving(udev_monitor);
@@ -1547,6 +1547,10 @@ void main() {\n\
} else {
gl.ActiveTexture(GL_TEXTURE0);
gl.BindTexture(GL_TEXTURE_2D, no_camera_texture);
+ gl.ActiveTexture(GL_TEXTURE1);
+ gl.BindTexture(GL_TEXTURE_2D, black_texture);
+ gl.ActiveTexture(GL_TEXTURE2);
+ gl.BindTexture(GL_TEXTURE_2D, black_texture);
gl.Uniform1i(u_pixel_format, V4L2_PIX_FMT_RGB24);
}
double timer_time_left = settings->timer - (curr_time - state->timer_activate_time);
diff --git a/video.c b/video.c
index b04136e..16f7fa3 100644
--- a/video.c
+++ b/video.c
@@ -8,7 +8,7 @@
#include <pulse/simple.h>
#include <unistd.h>
-#include "util.h"
+#include "log.h"
#include "camera.h"
// no real harm in making this bigger, other than increased memory usage.
@@ -58,7 +58,7 @@ static int audio_thread(void *data) {
pa_simple *pulseaudio = pa_simple_new(NULL, "camlet", PA_STREAM_RECORD, NULL,
"microphone", &audio_format, NULL, &buffer_attr, &err);
if (!pulseaudio) {
- fprintf(stderr, "couldn't connect to pulseaudio: %s", pa_strerror(err));
+ log_error("couldn't connect to pulseaudio: %s", pa_strerror(err));
return -1;
}
uint32_t warned[2] = {0};
@@ -84,12 +84,11 @@ static int audio_thread(void *data) {
}
if ((tail - head + AUDIO_QUEUE_SIZE) % AUDIO_QUEUE_SIZE > AUDIO_QUEUE_SIZE * 3 / 4) {
if (warned[0] < 10) {
- fprintf(stderr, "\x1b[93mwarning:\x1b[0m audio overrun\n");
+ log_warning("audio overrun");
warned[0]++;
}
} else if (result >= 0) {
const uint32_t nfloats = sizeof buf / sizeof(float);
- printf("capture: %u \n",nfloats);
if (tail + nfloats <= AUDIO_QUEUE_SIZE) {
// easy case
memcpy(&ctx->audio_queue[tail], buf, sizeof buf);
@@ -102,7 +101,7 @@ static int audio_thread(void *data) {
}
} else {
if (!warned[1]) {
- fprintf(stderr, "pa_simple_read: %s", pa_strerror(err));
+ log_error("pa_simple_read: %s", pa_strerror(err));
warned[1]++;
}
}
@@ -121,7 +120,7 @@ VideoContext *video_init(void) {
if (thrd_create(&ctx->audio_thread, audio_thread, ctx) == thrd_success) {
ctx->audio_thread_created = true;
} else {
- perror("couldn't create audio thread");
+ log_perror("couldn't create audio thread");
}
return ctx;
}
@@ -139,25 +138,29 @@ bool video_start(VideoContext *ctx, const char *filename, int32_t width, int32_t
}
int err = avformat_alloc_output_context2(&ctx->avf_context, NULL, NULL, filename);
if (!ctx->avf_context) {
- fprintf(stderr, "error: avformat_alloc_output_context2: %s\n", av_err2str(err));
+ log_error("avformat_alloc_output_context2 \"%s\": %s", filename, av_err2str(err));
return false;
}
const AVOutputFormat *fmt = ctx->avf_context->oformat;
const AVCodec *video_codec = avcodec_find_encoder(fmt->video_codec);
if (!video_codec) {
- fprintf(stderr, "couldn't find encoder for codec %s\n", avcodec_get_name(fmt->video_codec));
+ log_error("couldn't find encoder for video codec %s", avcodec_get_name(fmt->video_codec));
return false;
}
ctx->video_stream = avformat_new_stream(ctx->avf_context, NULL);
+ if (!ctx->video_stream) {
+ log_error("avformat_new_stream (audio): %s", av_err2str(err));
+ return false;
+ }
ctx->video_stream->id = 0;
ctx->video_encoder = avcodec_alloc_context3(video_codec);
if (!ctx->video_encoder) {
- fprintf(stderr, "couldn't create video encoding context\n");
+ log_error("couldn't create video encoding context for codec %s", avcodec_get_name(fmt->video_codec));
return false;
}
ctx->av_packet = av_packet_alloc();
if (!ctx->av_packet) {
- fprintf(stderr, "couldn't allocate video packet\n");
+ log_error("couldn't allocate video packet");
return false;
}
ctx->video_encoder->codec_id = fmt->video_codec;
@@ -171,17 +174,17 @@ bool video_start(VideoContext *ctx, const char *filename, int32_t width, int32_t
ctx->video_encoder->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
err = avcodec_open2(ctx->video_encoder, video_codec, NULL);
if (err < 0) {
- fprintf(stderr, "error: avcodec_open2: %s\n", av_err2str(err));
+ log_error("avcodec_open2 for video codec %s: %s", avcodec_get_name(fmt->video_codec), av_err2str(err));
return false;
}
err = avcodec_parameters_from_context(ctx->video_stream->codecpar, ctx->video_encoder);
if (err < 0) {
- fprintf(stderr, "error: avcodec_parameters_from_context: %s\n", av_err2str(err));
+ log_error("avcodec_parameters_from_context for video codec %s: %s", avcodec_get_name(fmt->video_codec), av_err2str(err));
return false;
}
ctx->video_frame = av_frame_alloc();
if (!ctx->video_frame) {
- fprintf(stderr, "couldn't allocate video frame\n");
+ log_error("couldn't allocate video frame");
return false;
}
ctx->video_frame->format = AV_PIX_FMT_YUV420P;
@@ -189,22 +192,22 @@ bool video_start(VideoContext *ctx, const char *filename, int32_t width, int32_t
ctx->video_frame->height = ctx->video_encoder->height;
err = av_frame_get_buffer(ctx->video_frame, 0);
if (err < 0) {
- fprintf(stderr, "error: av_frame_get_buffer: %s\n", av_err2str(err));
+ log_error("av_frame_get_buffer for video: %s", av_err2str(err));
return false;
}
err = avio_open(&ctx->avf_context->pb, filename, AVIO_FLAG_WRITE);
if (err < 0) {
- fprintf(stderr, "error: avio_open: %s\n", av_err2str(err));
+ log_error("avio_open \"%s\": %s", filename, av_err2str(err));
return false;
}
const AVCodec *audio_codec = avcodec_find_encoder(fmt->audio_codec);
if (!audio_codec) {
- fprintf(stderr, "error: avcodec_find_encoder: %s\n", av_err2str(err));
+ log_error("avcodec_find_encoder for audio codec %s: %s", avcodec_get_name(fmt->audio_codec), av_err2str(err));
goto no_audio;
}
ctx->audio_encoder = avcodec_alloc_context3(audio_codec);
if (!ctx->audio_encoder) {
- fprintf(stderr, "error: avcodec_alloc_context3: %s\n", av_err2str(err));
+ log_error("avcodec_alloc_context3 for audio codec %s: %s", avcodec_get_name(fmt->audio_codec), av_err2str(err));
goto no_audio;
}
// only FLTP is supported by AAC encoder
@@ -217,7 +220,7 @@ bool video_start(VideoContext *ctx, const char *filename, int32_t width, int32_t
ctx->audio_encoder->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
err = avcodec_open2(ctx->audio_encoder, audio_codec, NULL);
if (err < 0) {
- fprintf(stderr, "error: couldn't set audio encoder codec (avcodec_open2): %s\n", av_err2str(err));
+ log_error("avcodec_open2 for audio codec %s: %s", avcodec_get_name(fmt->audio_codec), av_err2str(err));
goto no_audio;
}
ctx->audio_frame_samples = audio_codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE
@@ -225,7 +228,7 @@ bool video_start(VideoContext *ctx, const char *filename, int32_t width, int32_t
: ctx->audio_encoder->frame_size;
ctx->audio_frame = av_frame_alloc();
if (!ctx->audio_frame) {
- fprintf(stderr, "error: couldn't allocate audio frame\n");
+ log_error("couldn't allocate audio frame");
goto no_audio;
}
ctx->audio_frame->format = AV_SAMPLE_FMT_FLTP;
@@ -234,27 +237,27 @@ bool video_start(VideoContext *ctx, const char *filename, int32_t width, int32_t
ctx->audio_frame->nb_samples = ctx->audio_frame_samples;
err = av_frame_get_buffer(ctx->audio_frame, 0);
if (err < 0) {
- fprintf(stderr, "error: av_frame_get_buffer (audio): %s\n", av_err2str(err));
+ log_error("av_frame_get_buffer (audio): %s", av_err2str(err));
goto no_audio;
}
// create stream last so that if stuff above fails we don't have a broken stream in the avformat context
ctx->audio_stream = avformat_new_stream(ctx->avf_context, audio_codec);
if (!ctx->audio_stream) {
- fprintf(stderr, "error: avformat_new_stream (audio): %s\n", av_err2str(err));
+ log_error("avformat_new_stream (audio): %s", av_err2str(err));
goto no_audio;
}
ctx->audio_stream->id = 1;
ctx->audio_stream->time_base = (AVRational){1, 44100};
err = avcodec_parameters_from_context(ctx->audio_stream->codecpar, ctx->audio_encoder);
if (err < 0) {
- fprintf(stderr, "error: avcodec_parameters_from_context (audio): %s\n", av_err2str(err));
+ log_error("avcodec_parameters_from_context (audio): %s", av_err2str(err));
goto no_audio;
}
no_audio:
err = avformat_write_header(ctx->avf_context, NULL);
if (err < 0) {
- fprintf(stderr, "error: avformat_write_header: %s\n", av_err2str(err));
+ log_error("avformat_write_header: %s", av_err2str(err));
return false;
}
atomic_store(&ctx->audio_head, 0);
@@ -269,7 +272,7 @@ no_audio:
static bool write_frame(VideoContext *ctx, AVCodecContext *encoder, AVStream *stream, AVFrame *frame) {
int err = avcodec_send_frame(encoder, frame);
if (err < 0) {
- fprintf(stderr, "error: avcodec_send_frame: %s\n", av_err2str(err));
+ log_error("avcodec_send_frame (stream %d): %s", stream->index, av_err2str(err));
return false;
}
while (true) {
@@ -278,14 +281,14 @@ static bool write_frame(VideoContext *ctx, AVCodecContext *encoder, AVStream *st
break;
}
if (err < 0) {
- fprintf(stderr, "error: avcodec_receive_packet: %s\n", av_err2str(err));
+ log_error("avcodec_receive_packet (stream %d): %s", stream->index, av_err2str(err));
return false;
}
ctx->av_packet->stream_index = stream->index;
av_packet_rescale_ts(ctx->av_packet, encoder->time_base, stream->time_base);
err = av_interleaved_write_frame(ctx->avf_context, ctx->av_packet);
if (err < 0) {
- fprintf(stderr, "error: av_interleaved_write_frame: %s\n", av_err2str(err));
+ log_error("av_interleaved_write_frame (stream %d): %s", stream->index, av_err2str(err));
return false;
}
}
@@ -301,11 +304,10 @@ bool video_submit_frame(VideoContext *ctx, Camera *camera) {
// only this thread writes to head, so relaxed is fine.
uint32_t head = atomic_load_explicit(&ctx->audio_head, memory_order_relaxed);
uint32_t tail = atomic_load(&ctx->audio_tail);
- printf("start recv: head=%u tail=%u\n",head,tail);
while (true) {
int err = av_frame_make_writable(ctx->audio_frame);
if (err < 0) {
- fprintf(stderr, "error: av_frame_make_writable: %s\n", av_err2str(err));
+ log_error("av_frame_make_writable (video): %s", av_err2str(err));
break;
}
ctx->audio_frame->pts = ctx->next_audio_pts;
@@ -338,10 +340,8 @@ bool video_submit_frame(VideoContext *ctx, Camera *camera) {
}
if (frame_ready) {
ctx->next_audio_pts += ctx->audio_frame_samples;
- printf("recvd: %u\n",nfloats);
write_frame(ctx, ctx->audio_encoder, ctx->audio_stream, ctx->audio_frame);
} else {
- printf("end recv\n");
break;
}
}
@@ -354,7 +354,7 @@ bool video_submit_frame(VideoContext *ctx, Camera *camera) {
if (video_pts >= ctx->next_video_pts) {
int err = av_frame_make_writable(ctx->video_frame);
if (err < 0) {
- fprintf(stderr, "error: av_frame_make_writable: %s\n", av_err2str(err));
+ log_error("av_frame_make_writable (audio): %s", av_err2str(err));
return false;
}
ctx->video_frame->pts = video_pts;
@@ -381,7 +381,7 @@ void video_stop(VideoContext *ctx) {
write_frame(ctx, ctx->audio_encoder, ctx->audio_stream, NULL);
int err = av_write_trailer(ctx->avf_context);
if (err < 0) {
- fprintf(stderr, "error: av_write_trailer: %s\n", av_err2str(err));
+ log_error("av_write_trailer: %s", av_err2str(err));
}
avio_closep(&ctx->avf_context->pb);
}
@@ -409,7 +409,9 @@ void video_quit(VideoContext *ctx) {
video_stop(ctx);
if (ctx->audio_thread_created) {
atomic_store(&ctx->audio_head, AUDIO_QUIT);
- thrd_join(ctx->audio_thread, NULL);
+ if (thrd_join(ctx->audio_thread, NULL) != thrd_success) {
+ log_perror("thrd_join");
+ }
}
free(ctx);
}