diff options
author | pommicket <pommicket@gmail.com> | 2025-02-20 13:22:22 -0500 |
---|---|---|
committer | pommicket <pommicket@gmail.com> | 2025-02-20 13:22:22 -0500 |
commit | 6e46884abd0996b46c61953dd0bcbc5466db427e (patch) | |
tree | 021313d1bd77f89aa06021e9fc86915332325186 | |
parent | d71ba7564afef063db8ed9ff1011415d96c921fe (diff) |
YUV420
-rw-r--r-- | camera.c | 80 | ||||
-rw-r--r-- | main.c | 16 |
2 files changed, 90 insertions, 6 deletions
@@ -145,6 +145,8 @@ bool pix_fmt_supported(uint32_t pixfmt) { case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_GREY: case V4L2_PIX_FMT_MJPEG: + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_YVU420: return true; } return false; @@ -345,10 +347,10 @@ static uint8_t *curr_frame_rgb24(Camera *camera) { } int32_t frame_width = camera_frame_width(camera); int32_t frame_height = camera_frame_height(camera); + const uint8_t *in = curr_frame, *in_y = NULL, *in_cb = NULL, *in_cr = NULL; + uint8_t *out = camera->decompression_buf; switch (camera_pixel_format(camera)) { case V4L2_PIX_FMT_BGR24: { - const uint8_t *in = curr_frame; - uint8_t *out = camera->decompression_buf; for (int32_t y = 0; y < frame_height; y++) { for (int32_t x = 0; x < frame_width; x++) { *out++ = in[2]; @@ -359,9 +361,18 @@ static uint8_t *curr_frame_rgb24(Camera *camera) { } return camera->decompression_buf; } break; + case V4L2_PIX_FMT_GREY: { + for (int32_t y = 0; y < frame_height; y++) { + for (int32_t x = 0; x < frame_width; x++) { + uint8_t b = *in++; + *out++ = b; + *out++ = b; + *out++ = b; + } + } + return camera->decompression_buf; + } break; case V4L2_PIX_FMT_YUYV: { - const uint8_t *in = curr_frame; - uint8_t *out = camera->decompression_buf; for (int32_t y = 0; y < frame_height; y++) { for (int32_t x = 0; x < frame_width / 2; x++) { float y0 = (float)(*in++) * (1.0f / 255.0f); @@ -381,6 +392,39 @@ static uint8_t *curr_frame_rgb24(Camera *camera) { } return camera->decompression_buf; } + case V4L2_PIX_FMT_YUV420: + in_y = curr_frame; + in_cb = curr_frame + (size_t)frame_width * (size_t)frame_height; + in_cr = curr_frame + (size_t)frame_width * (size_t)frame_height * 5 / 4; + goto yuv420_planar; + case V4L2_PIX_FMT_YVU420: + in_y = curr_frame; + in_cr = curr_frame + (size_t)frame_width * (size_t)frame_height; + in_cb = curr_frame + (size_t)frame_width * (size_t)frame_height * 5 / 4; + goto yuv420_planar; + yuv420_planar: + for (int32_t row = 0; row < frame_height; row++) { + for (int32_t col = 0; col < frame_width; col++) { + float y = (float)(*in_y++) * (1.0f / 255.0f); + float cb = (float)(*in_cb) * (1.0f / 255.0f); + float cr = (float)(*in_cr) * (1.0f / 255.0f); + if (col % 2 == 1) { + in_cb++; + in_cr++; + } + float rgb[3]; + ycbcr_ITU_R_601_to_rgb(y, cb, cr, rgb); + *out++ = (uint8_t)roundf(rgb[0] * 255); + *out++ = (uint8_t)roundf(rgb[1] * 255); + *out++ = (uint8_t)roundf(rgb[2] * 255); + } + if (row % 2 == 0) { + // go back to start of cb, cr row + in_cb -= frame_width / 2; + in_cr -= frame_width / 2; + } + } + return camera->decompression_buf; } assert(false); return NULL; @@ -552,6 +596,29 @@ void camera_update_gl_textures(Camera *camera, const GLuint textures[3]) { if (camera->frame_bytes_set >= (size_t)frame_width * (size_t)frame_height * 2) gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, frame_width / 2, frame_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, curr_frame); break; + case V4L2_PIX_FMT_YUV420: + if (camera->frame_bytes_set >= (size_t)frame_width * (size_t)frame_height * 3 / 2) { + gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RED, frame_width, frame_height, 0, GL_RED, GL_UNSIGNED_BYTE, curr_frame); + gl.BindTexture(GL_TEXTURE_2D, textures[1]); + gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RED, frame_width / 2, frame_height / 2, 0, GL_RED, GL_UNSIGNED_BYTE, + curr_frame + (size_t)frame_width * (size_t)frame_height); + gl.BindTexture(GL_TEXTURE_2D, textures[2]); + gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RED, frame_width / 2, frame_height / 2, 0, GL_RED, GL_UNSIGNED_BYTE, + curr_frame + (size_t)frame_width * (size_t)frame_height * 5 / 4); + } + break; + case V4L2_PIX_FMT_YVU420: + if (camera->frame_bytes_set >= (size_t)frame_width * (size_t)frame_height * 3 / 2) { + // same as above, but swap textures[1] and textures[2] so that we only have to handle one case in the shader + gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RED, frame_width, frame_height, 0, GL_RED, GL_UNSIGNED_BYTE, curr_frame); + gl.BindTexture(GL_TEXTURE_2D, textures[2]); + gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RED, frame_width / 2, frame_height / 2, 0, GL_RED, GL_UNSIGNED_BYTE, + curr_frame + (size_t)frame_width * (size_t)frame_height); + gl.BindTexture(GL_TEXTURE_2D, textures[1]); + gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RED, frame_width / 2, frame_height / 2, 0, GL_RED, GL_UNSIGNED_BYTE, + curr_frame + (size_t)frame_width * (size_t)frame_height * 5 / 4); + } + break; case V4L2_PIX_FMT_MJPEG: { // "motion jpeg" is actually just a series of jpegs gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGB, frame_width, frame_height, 0, GL_RGB, GL_UNSIGNED_BYTE, @@ -709,6 +776,11 @@ static void cameras_from_device_with_fd(const char *dev_path, const char *serial // are there even any stepwise cameras out there?? who knows. uint32_t frame_width = frmsize.type == V4L2_FRMSIZE_TYPE_DISCRETE ? frmsize.discrete.width : frmsize.stepwise.max_width; uint32_t frame_height = frmsize.type == V4L2_FRMSIZE_TYPE_DISCRETE ? frmsize.discrete.height : frmsize.stepwise.max_height; + if (frame_width % 4 || frame_height % 4) { + // fucked up frame size + // (would probably break some YUV pixel formats) + continue; + } arr_add(camera->formats, ((PictureFormat) { .width = frame_width, .height = frame_height, @@ -384,6 +384,8 @@ void main() {\n\ const char *fshader_code = "in vec4 color;\n\ in vec2 tex_coord;\n\ uniform sampler2D u_sampler;\n\ +uniform sampler2D u_sampler2;\n\ +uniform sampler2D u_sampler3;\n\ uniform int u_pixel_format;\n\ uniform float u_flash;\n\ uniform float u_opacity;\n\ @@ -404,7 +406,7 @@ void main() {\n\ case 0x47585858: // XXXGRAY (used for FPS display currently)\n\ color = vec3(texture2D(u_sampler, tex_coord).w);\n\ break;\n\ - case 0x56595559: { // YUYV (YUV 4:2:2)\n\ + case 0x56595559: { // YUYV 4:2:2 interleaved\n\ ivec2 texsize = textureSize(u_sampler, 0);\n\ vec2 tc = tex_coord * vec2(texsize);\n\ ivec2 tc00 = ivec2(tc);\n\ @@ -431,6 +433,13 @@ void main() {\n\ // technically we should check v4l2_pix_format::ycbcr_enc, but whatever.\n\ color = ycbcr_ITU_R_601_to_rgb(vec3(y,cbcr));\n\ } break;\n\ + case 0x32315559: // YUV 4:2:0 with separate planes\n\ + case 0x32315659: { // YVU 4:2:0 with separate planes (planes are reordered to YUV in camera.c)\n\ + float y = texture2D(u_sampler, tex_coord).x;\n\ + float cb = texture2D(u_sampler2, tex_coord).x;\n\ + float cr = texture2D(u_sampler3, tex_coord).x;\n\ + color = ycbcr_ITU_R_601_to_rgb(vec3(y,cb,cr));\n\ + } break;\n\ default:\n\ color = texture2D(u_sampler, tex_coord).xyz;\n\ break;\n\ @@ -448,6 +457,8 @@ void main() {\n\ gl.GenBuffers(1, &vbo); gl.GenVertexArrays(1, &vao); const GLuint u_sampler = gl.GetUniformLocation(program, "u_sampler"); + const GLuint u_sampler2 = gl.GetUniformLocation(program, "u_sampler2"); + const GLuint u_sampler3 = gl.GetUniformLocation(program, "u_sampler3"); const GLuint u_offset = gl.GetUniformLocation(program, "u_offset"); const GLuint u_flash = gl.GetUniformLocation(program, "u_flash"); const GLuint u_pixel_format = gl.GetUniformLocation(program, "u_pixel_format"); @@ -828,8 +839,9 @@ void main() {\n\ last_time = curr_time; gl.UseProgram(program); - gl.ActiveTexture(GL_TEXTURE0); gl.Uniform1i(u_sampler, 0); + gl.Uniform1i(u_sampler2, 1); + gl.Uniform1i(u_sampler3, 2); gl.Uniform1f(u_opacity, 1); gl.Uniform2f(u_offset, 0, 0); { |