Compare commits
9 Commits
h264-v4l2m
...
master
Author | SHA1 | Date |
---|---|---|
Jonas Letzbor | ce755cb1a3 | |
Attila Fidan | 115346f074 | |
Andri Yngvason | 0e93aa969f | |
Andri Yngvason | a77b99f2b4 | |
Andri Yngvason | 47e714b2bf | |
Andri Yngvason | 08d0c64ff9 | |
Andri Yngvason | 0bf53a4843 | |
Andri Yngvason | b043f004a8 | |
Alfred Wingate | d95b678d7a |
|
@ -8,3 +8,4 @@ build
|
|||
experiments
|
||||
subprojects
|
||||
sandbox
|
||||
.vscode
|
|
@ -1 +1,2 @@
|
|||
github: any1
|
||||
patreon: andriyngvason
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2019 - 2020 Andri Yngvason
|
||||
* Copyright (c) 2019 - 2024 Andri Yngvason
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -109,6 +109,8 @@ struct nvnc_client {
|
|||
uint32_t cursor_seq;
|
||||
int quality;
|
||||
bool formats_changed;
|
||||
enum nvnc_keyboard_led_state led_state;
|
||||
enum nvnc_keyboard_led_state pending_led_state;
|
||||
|
||||
#ifdef HAVE_CRYPTO
|
||||
struct crypto_key* apple_dh_secret;
|
||||
|
@ -129,6 +131,7 @@ enum nvnc__socket_type {
|
|||
NVNC__SOCKET_TCP,
|
||||
NVNC__SOCKET_UNIX,
|
||||
NVNC__SOCKET_WEBSOCKET,
|
||||
NVNC__SOCKET_FROM_FD,
|
||||
};
|
||||
|
||||
struct nvnc {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2021 - 2022 Andri Yngvason
|
||||
* Copyright (c) 2021 - 2024 Andri Yngvason
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -17,13 +17,28 @@
|
|||
|
||||
#include <stdint.h>
|
||||
#include <unistd.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
struct h264_encoder;
|
||||
struct nvnc_fb;
|
||||
struct h264_encoder;
|
||||
|
||||
typedef void (*h264_encoder_packet_handler_fn)(const void* payload, size_t size,
|
||||
uint64_t pts, void* userdata);
|
||||
|
||||
struct h264_encoder_impl {
|
||||
struct h264_encoder* (*create)(uint32_t width, uint32_t height,
|
||||
uint32_t format, int quality);
|
||||
void (*destroy)(struct h264_encoder*);
|
||||
void (*feed)(struct h264_encoder*, struct nvnc_fb*);
|
||||
};
|
||||
|
||||
struct h264_encoder {
|
||||
struct h264_encoder_impl *impl;
|
||||
h264_encoder_packet_handler_fn on_packet_ready;
|
||||
void* userdata;
|
||||
bool next_frame_should_be_keyframe;
|
||||
};
|
||||
|
||||
struct h264_encoder* h264_encoder_create(uint32_t width, uint32_t height,
|
||||
uint32_t format, int quality);
|
||||
|
||||
|
|
|
@ -86,6 +86,12 @@ enum nvnc_transform {
|
|||
NVNC_TRANSFORM_FLIPPED_270 = 7,
|
||||
};
|
||||
|
||||
enum nvnc_keyboard_led_state {
|
||||
NVNC_KEYBOARD_LED_SCROLL_LOCK = 1 << 0,
|
||||
NVNC_KEYBOARD_LED_NUM_LOCK = 1 << 1,
|
||||
NVNC_KEYBOARD_LED_CAPS_LOCK = 1 << 2,
|
||||
};
|
||||
|
||||
enum nvnc_log_level {
|
||||
NVNC_LOG_PANIC = 0,
|
||||
NVNC_LOG_ERROR = 1,
|
||||
|
@ -132,6 +138,7 @@ extern const char nvnc_version[];
|
|||
struct nvnc* nvnc_open(const char* addr, uint16_t port);
|
||||
struct nvnc* nvnc_open_unix(const char *addr);
|
||||
struct nvnc* nvnc_open_websocket(const char* addr, uint16_t port);
|
||||
struct nvnc* nvnc_open_from_fd(int fd);
|
||||
void nvnc_close(struct nvnc* self);
|
||||
|
||||
void nvnc_add_display(struct nvnc*, struct nvnc_display*);
|
||||
|
@ -150,6 +157,9 @@ struct nvnc_client* nvnc_client_first(struct nvnc* self);
|
|||
struct nvnc_client* nvnc_client_next(struct nvnc_client* client);
|
||||
void nvnc_client_close(struct nvnc_client* client);
|
||||
|
||||
void nvnc_client_set_led_state(struct nvnc_client*,
|
||||
enum nvnc_keyboard_led_state);
|
||||
|
||||
void nvnc_set_name(struct nvnc* self, const char* name);
|
||||
|
||||
void nvnc_set_key_fn(struct nvnc* self, nvnc_key_fn);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2019 - 2022 Andri Yngvason
|
||||
* Copyright (c) 2019 - 2024 Andri Yngvason
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -69,9 +69,11 @@ enum rfb_encodings {
|
|||
RFB_ENCODING_CURSOR = -239,
|
||||
RFB_ENCODING_DESKTOPSIZE = -223,
|
||||
RFB_ENCODING_QEMU_EXT_KEY_EVENT = -258,
|
||||
RFB_ENCODING_QEMU_LED_STATE = -261,
|
||||
RFB_ENCODING_EXTENDEDDESKTOPSIZE = -308,
|
||||
RFB_ENCODING_PTS = -1000,
|
||||
RFB_ENCODING_NTP = -1001,
|
||||
RFB_ENCODING_VMWARE_LED_STATE = 0x574d5668,
|
||||
};
|
||||
|
||||
#define RFB_ENCODING_JPEG_HIGHQ -23
|
||||
|
@ -114,6 +116,13 @@ enum rfb_rsa_aes_cred_subtype {
|
|||
RFB_RSA_AES_CRED_SUBTYPE_ONLY_PASS = 2,
|
||||
};
|
||||
|
||||
// This is the same for both qemu and vmware extensions
|
||||
enum rfb_led_state {
|
||||
RFB_LED_STATE_SCROLL_LOCK = 1 << 0,
|
||||
RFB_LED_STATE_NUM_LOCK = 1 << 1,
|
||||
RFB_LED_STATE_CAPS_LOCK = 1 << 2,
|
||||
};
|
||||
|
||||
struct rfb_security_types_msg {
|
||||
uint8_t n;
|
||||
uint8_t types[0];
|
||||
|
|
19
meson.build
19
meson.build
|
@ -138,13 +138,26 @@ if gbm.found()
|
|||
config.set('HAVE_GBM', true)
|
||||
endif
|
||||
|
||||
if gbm.found() and libdrm.found() and libavcodec.found() and libavfilter.found() and libavutil.found()
|
||||
sources += [ 'src/h264-encoder.c', 'src/open-h264.c' ]
|
||||
have_ffmpeg = gbm.found() and libdrm.found() and libavcodec.found() and libavfilter.found() and libavutil.found()
|
||||
have_v4l2 = gbm.found() and libdrm.found() and cc.check_header('linux/videodev2.h')
|
||||
|
||||
if have_ffmpeg
|
||||
sources += [ 'src/h264-encoder-ffmpeg-impl.c' ]
|
||||
dependencies += [libdrm, libavcodec, libavfilter, libavutil]
|
||||
config.set('ENABLE_OPEN_H264', true)
|
||||
config.set('HAVE_FFMPEG', true)
|
||||
config.set('HAVE_LIBAVUTIL', true)
|
||||
endif
|
||||
|
||||
if have_v4l2
|
||||
sources += [ 'src/h264-encoder-v4l2m2m-impl.c' ]
|
||||
config.set('HAVE_V4L2', true)
|
||||
endif
|
||||
|
||||
if have_ffmpeg or have_v4l2
|
||||
sources += [ 'src/h264-encoder.c', 'src/open-h264.c' ]
|
||||
config.set('ENABLE_OPEN_H264', true)
|
||||
endif
|
||||
|
||||
if enable_websocket
|
||||
sources += [
|
||||
'src/ws-handshake.c',
|
||||
|
|
|
@ -0,0 +1,627 @@
|
|||
/*
|
||||
* Copyright (c) 2021 - 2024 Andri Yngvason
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
||||
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "h264-encoder.h"
|
||||
#include "neatvnc.h"
|
||||
#include "fb.h"
|
||||
#include "sys/queue.h"
|
||||
#include "vec.h"
|
||||
#include "usdt.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <unistd.h>
|
||||
#include <assert.h>
|
||||
#include <gbm.h>
|
||||
#include <xf86drm.h>
|
||||
#include <aml.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavutil/hwcontext_drm.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
#include <libavutil/dict.h>
|
||||
#include <libavfilter/avfilter.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
|
||||
#include <libdrm/drm_fourcc.h>
|
||||
|
||||
struct h264_encoder;
|
||||
|
||||
struct fb_queue_entry {
|
||||
struct nvnc_fb* fb;
|
||||
TAILQ_ENTRY(fb_queue_entry) link;
|
||||
};
|
||||
|
||||
TAILQ_HEAD(fb_queue, fb_queue_entry);
|
||||
|
||||
struct h264_encoder_ffmpeg {
|
||||
struct h264_encoder base;
|
||||
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t format;
|
||||
|
||||
AVRational timebase;
|
||||
AVRational sample_aspect_ratio;
|
||||
enum AVPixelFormat av_pixel_format;
|
||||
|
||||
/* type: AVHWDeviceContext */
|
||||
AVBufferRef* hw_device_ctx;
|
||||
|
||||
/* type: AVHWFramesContext */
|
||||
AVBufferRef* hw_frames_ctx;
|
||||
|
||||
AVCodecContext* codec_ctx;
|
||||
|
||||
AVFilterGraph* filter_graph;
|
||||
AVFilterContext* filter_in;
|
||||
AVFilterContext* filter_out;
|
||||
|
||||
struct fb_queue fb_queue;
|
||||
|
||||
struct aml_work* work;
|
||||
struct nvnc_fb* current_fb;
|
||||
struct vec current_packet;
|
||||
bool current_frame_is_keyframe;
|
||||
|
||||
bool please_destroy;
|
||||
};
|
||||
|
||||
struct h264_encoder_impl h264_encoder_ffmpeg_impl;
|
||||
|
||||
static enum AVPixelFormat drm_to_av_pixel_format(uint32_t format)
|
||||
{
|
||||
switch (format) {
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
return AV_PIX_FMT_BGR0;
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
return AV_PIX_FMT_RGB0;
|
||||
case DRM_FORMAT_RGBX8888:
|
||||
case DRM_FORMAT_RGBA8888:
|
||||
return AV_PIX_FMT_0BGR;
|
||||
case DRM_FORMAT_BGRX8888:
|
||||
case DRM_FORMAT_BGRA8888:
|
||||
return AV_PIX_FMT_0RGB;
|
||||
}
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static void hw_frame_desc_free(void* opaque, uint8_t* data)
|
||||
{
|
||||
struct AVDRMFrameDescriptor* desc = (void*)data;
|
||||
assert(desc);
|
||||
|
||||
for (int i = 0; i < desc->nb_objects; ++i)
|
||||
close(desc->objects[i].fd);
|
||||
|
||||
free(desc);
|
||||
}
|
||||
|
||||
// TODO: Maybe do this once per frame inside nvnc_fb?
|
||||
static AVFrame* fb_to_avframe(struct nvnc_fb* fb)
|
||||
{
|
||||
struct gbm_bo* bo = fb->bo;
|
||||
|
||||
int n_planes = gbm_bo_get_plane_count(bo);
|
||||
|
||||
AVDRMFrameDescriptor* desc = calloc(1, sizeof(*desc));
|
||||
desc->nb_objects = n_planes;
|
||||
|
||||
desc->nb_layers = 1;
|
||||
desc->layers[0].format = gbm_bo_get_format(bo);
|
||||
desc->layers[0].nb_planes = n_planes;
|
||||
|
||||
for (int i = 0; i < n_planes; ++i) {
|
||||
uint32_t stride = gbm_bo_get_stride_for_plane(bo, i);
|
||||
|
||||
desc->objects[i].fd = gbm_bo_get_fd_for_plane(bo, i);
|
||||
desc->objects[i].size = stride * fb->height;
|
||||
desc->objects[i].format_modifier = gbm_bo_get_modifier(bo);
|
||||
|
||||
desc->layers[0].format = gbm_bo_get_format(bo);
|
||||
desc->layers[0].planes[i].object_index = i;
|
||||
desc->layers[0].planes[i].offset = gbm_bo_get_offset(bo, i);
|
||||
desc->layers[0].planes[i].pitch = stride;
|
||||
}
|
||||
|
||||
AVFrame* frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
hw_frame_desc_free(NULL, (void*)desc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
frame->opaque = fb;
|
||||
frame->width = fb->width;
|
||||
frame->height = fb->height;
|
||||
frame->format = AV_PIX_FMT_DRM_PRIME;
|
||||
frame->sample_aspect_ratio = (AVRational){1, 1};
|
||||
|
||||
AVBufferRef* desc_ref = av_buffer_create((void*)desc, sizeof(*desc),
|
||||
hw_frame_desc_free, NULL, 0);
|
||||
if (!desc_ref) {
|
||||
hw_frame_desc_free(NULL, (void*)desc);
|
||||
av_frame_free(&frame);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
frame->buf[0] = desc_ref;
|
||||
frame->data[0] = (void*)desc_ref->data;
|
||||
|
||||
// TODO: Set colorspace?
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
static struct nvnc_fb* fb_queue_dequeue(struct fb_queue* queue)
|
||||
{
|
||||
if (TAILQ_EMPTY(queue))
|
||||
return NULL;
|
||||
|
||||
struct fb_queue_entry* entry = TAILQ_FIRST(queue);
|
||||
TAILQ_REMOVE(queue, entry, link);
|
||||
struct nvnc_fb* fb = entry->fb;
|
||||
free(entry);
|
||||
|
||||
return fb;
|
||||
}
|
||||
|
||||
static int fb_queue_enqueue(struct fb_queue* queue, struct nvnc_fb* fb)
|
||||
{
|
||||
struct fb_queue_entry* entry = calloc(1, sizeof(*entry));
|
||||
if (!entry)
|
||||
return -1;
|
||||
|
||||
entry->fb = fb;
|
||||
nvnc_fb_ref(fb);
|
||||
TAILQ_INSERT_TAIL(queue, entry, link);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int h264_encoder__init_buffersrc(struct h264_encoder_ffmpeg* self)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* Placeholder values are used to pacify input checking and the real
|
||||
* values are set below.
|
||||
*/
|
||||
rc = avfilter_graph_create_filter(&self->filter_in,
|
||||
avfilter_get_by_name("buffer"), "in",
|
||||
"width=1:height=1:pix_fmt=drm_prime:time_base=1/1", NULL,
|
||||
self->filter_graph);
|
||||
if (rc != 0)
|
||||
return -1;
|
||||
|
||||
AVBufferSrcParameters *params = av_buffersrc_parameters_alloc();
|
||||
if (!params)
|
||||
return -1;
|
||||
|
||||
params->format = AV_PIX_FMT_DRM_PRIME;
|
||||
params->width = self->width;
|
||||
params->height = self->height;
|
||||
params->sample_aspect_ratio = self->sample_aspect_ratio;
|
||||
params->time_base = self->timebase;
|
||||
params->hw_frames_ctx = self->hw_frames_ctx;
|
||||
|
||||
rc = av_buffersrc_parameters_set(self->filter_in, params);
|
||||
assert(rc == 0);
|
||||
|
||||
av_free(params);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int h264_encoder__init_filters(struct h264_encoder_ffmpeg* self)
|
||||
{
|
||||
int rc;
|
||||
|
||||
self->filter_graph = avfilter_graph_alloc();
|
||||
if (!self->filter_graph)
|
||||
return -1;
|
||||
|
||||
rc = h264_encoder__init_buffersrc(self);
|
||||
if (rc != 0)
|
||||
goto failure;
|
||||
|
||||
rc = avfilter_graph_create_filter(&self->filter_out,
|
||||
avfilter_get_by_name("buffersink"), "out", NULL,
|
||||
NULL, self->filter_graph);
|
||||
if (rc != 0)
|
||||
goto failure;
|
||||
|
||||
AVFilterInOut* inputs = avfilter_inout_alloc();
|
||||
if (!inputs)
|
||||
goto failure;
|
||||
|
||||
inputs->name = av_strdup("in");
|
||||
inputs->filter_ctx = self->filter_in;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
AVFilterInOut* outputs = avfilter_inout_alloc();
|
||||
if (!outputs) {
|
||||
avfilter_inout_free(&inputs);
|
||||
goto failure;
|
||||
}
|
||||
|
||||
outputs->name = av_strdup("out");
|
||||
outputs->filter_ctx = self->filter_out;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
rc = avfilter_graph_parse(self->filter_graph,
|
||||
"hwmap=mode=direct:derive_device=vaapi"
|
||||
",scale_vaapi=format=nv12:mode=fast",
|
||||
outputs, inputs, NULL);
|
||||
if (rc != 0)
|
||||
goto failure;
|
||||
|
||||
assert(self->hw_device_ctx);
|
||||
|
||||
for (unsigned int i = 0; i < self->filter_graph->nb_filters; ++i) {
|
||||
self->filter_graph->filters[i]->hw_device_ctx =
|
||||
av_buffer_ref(self->hw_device_ctx);
|
||||
}
|
||||
|
||||
rc = avfilter_graph_config(self->filter_graph, NULL);
|
||||
if (rc != 0)
|
||||
goto failure;
|
||||
|
||||
return 0;
|
||||
|
||||
failure:
|
||||
avfilter_graph_free(&self->filter_graph);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int h264_encoder__init_codec_context(struct h264_encoder_ffmpeg* self,
|
||||
const AVCodec* codec, int quality)
|
||||
{
|
||||
self->codec_ctx = avcodec_alloc_context3(codec);
|
||||
if (!self->codec_ctx)
|
||||
return -1;
|
||||
|
||||
struct AVCodecContext* c = self->codec_ctx;
|
||||
c->width = self->width;
|
||||
c->height = self->height;
|
||||
c->time_base = self->timebase;
|
||||
c->sample_aspect_ratio = self->sample_aspect_ratio;
|
||||
c->pix_fmt = AV_PIX_FMT_VAAPI;
|
||||
c->gop_size = INT32_MAX; /* We'll select key frames manually */
|
||||
c->max_b_frames = 0; /* B-frames are bad for latency */
|
||||
c->global_quality = quality;
|
||||
|
||||
/* open-h264 requires baseline profile, so we use constrained
|
||||
* baseline: AV_PROFILE_H264_BASELINE.
|
||||
* But that is not supported by many clients. So we use a "DEFAULT" profile.
|
||||
*
|
||||
*/
|
||||
c->profile = AV_PROFILE_H264_MAIN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int h264_encoder__init_hw_frames_context(struct h264_encoder_ffmpeg* self)
|
||||
{
|
||||
self->hw_frames_ctx = av_hwframe_ctx_alloc(self->hw_device_ctx);
|
||||
if (!self->hw_frames_ctx)
|
||||
return -1;
|
||||
|
||||
AVHWFramesContext* c = (AVHWFramesContext*)self->hw_frames_ctx->data;
|
||||
c->format = AV_PIX_FMT_DRM_PRIME;
|
||||
c->sw_format = drm_to_av_pixel_format(self->format);
|
||||
c->width = self->width;
|
||||
c->height = self->height;
|
||||
|
||||
if (av_hwframe_ctx_init(self->hw_frames_ctx) < 0)
|
||||
av_buffer_unref(&self->hw_frames_ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int h264_encoder__schedule_work(struct h264_encoder_ffmpeg* self)
|
||||
{
|
||||
if (self->current_fb)
|
||||
return 0;
|
||||
|
||||
self->current_fb = fb_queue_dequeue(&self->fb_queue);
|
||||
if (!self->current_fb)
|
||||
return 0;
|
||||
|
||||
DTRACE_PROBE1(neatvnc, h264_encode_frame_begin, self->current_fb->pts);
|
||||
|
||||
self->current_frame_is_keyframe = self->base.next_frame_should_be_keyframe;
|
||||
self->base.next_frame_should_be_keyframe = false;
|
||||
|
||||
return aml_start(aml_get_default(), self->work);
|
||||
}
|
||||
|
||||
static int h264_encoder__encode(struct h264_encoder_ffmpeg* self,
|
||||
AVFrame* frame_in)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = av_buffersrc_add_frame_flags(self->filter_in, frame_in,
|
||||
AV_BUFFERSRC_FLAG_KEEP_REF);
|
||||
if (rc != 0)
|
||||
return -1;
|
||||
|
||||
AVFrame* filtered_frame = av_frame_alloc();
|
||||
if (!filtered_frame)
|
||||
return -1;
|
||||
|
||||
rc = av_buffersink_get_frame(self->filter_out, filtered_frame);
|
||||
if (rc != 0)
|
||||
goto get_frame_failure;
|
||||
|
||||
rc = avcodec_send_frame(self->codec_ctx, filtered_frame);
|
||||
if (rc != 0)
|
||||
goto send_frame_failure;
|
||||
|
||||
AVPacket* packet = av_packet_alloc();
|
||||
assert(packet); // TODO
|
||||
|
||||
while (1) {
|
||||
rc = avcodec_receive_packet(self->codec_ctx, packet);
|
||||
if (rc != 0)
|
||||
break;
|
||||
|
||||
vec_append(&self->current_packet, packet->data, packet->size);
|
||||
|
||||
packet->stream_index = 0;
|
||||
av_packet_unref(packet);
|
||||
}
|
||||
|
||||
// Frame should always start with a zero:
|
||||
assert(self->current_packet.len == 0 ||
|
||||
((char*)self->current_packet.data)[0] == 0);
|
||||
|
||||
av_packet_free(&packet);
|
||||
send_frame_failure:
|
||||
av_frame_unref(filtered_frame);
|
||||
get_frame_failure:
|
||||
av_frame_free(&filtered_frame);
|
||||
return rc == AVERROR(EAGAIN) ? 0 : rc;
|
||||
}
|
||||
|
||||
static void h264_encoder__do_work(void* handle)
|
||||
{
|
||||
struct h264_encoder_ffmpeg* self = aml_get_userdata(handle);
|
||||
|
||||
AVFrame* frame = fb_to_avframe(self->current_fb);
|
||||
assert(frame); // TODO
|
||||
|
||||
frame->hw_frames_ctx = av_buffer_ref(self->hw_frames_ctx);
|
||||
|
||||
if (self->current_frame_is_keyframe) {
|
||||
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(58, 7, 100)
|
||||
frame->flags |= AV_FRAME_FLAG_KEY;
|
||||
#else
|
||||
frame->key_frame = 1;
|
||||
#endif
|
||||
frame->pict_type = AV_PICTURE_TYPE_I;
|
||||
} else {
|
||||
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(58, 7, 100)
|
||||
frame->flags &= ~AV_FRAME_FLAG_KEY;
|
||||
#else
|
||||
frame->key_frame = 0;
|
||||
#endif
|
||||
frame->pict_type = AV_PICTURE_TYPE_P;
|
||||
}
|
||||
|
||||
int rc = h264_encoder__encode(self, frame);
|
||||
if (rc != 0) {
|
||||
char err[256];
|
||||
av_strerror(rc, err, sizeof(err));
|
||||
nvnc_log(NVNC_LOG_ERROR, "Failed to encode packet: %s", err);
|
||||
goto failure;
|
||||
}
|
||||
|
||||
failure:
|
||||
av_frame_unref(frame);
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
|
||||
static void h264_encoder__on_work_done(void* handle)
|
||||
{
|
||||
struct h264_encoder_ffmpeg* self = aml_get_userdata(handle);
|
||||
|
||||
uint64_t pts = nvnc_fb_get_pts(self->current_fb);
|
||||
nvnc_fb_release(self->current_fb);
|
||||
nvnc_fb_unref(self->current_fb);
|
||||
self->current_fb = NULL;
|
||||
|
||||
DTRACE_PROBE1(neatvnc, h264_encode_frame_end, pts);
|
||||
|
||||
if (self->please_destroy) {
|
||||
vec_destroy(&self->current_packet);
|
||||
h264_encoder_destroy(&self->base);
|
||||
return;
|
||||
}
|
||||
|
||||
if (self->current_packet.len == 0) {
|
||||
nvnc_log(NVNC_LOG_WARNING, "Whoops, encoded packet length is 0");
|
||||
return;
|
||||
}
|
||||
|
||||
void* userdata = self->base.userdata;
|
||||
|
||||
// Must make a copy of packet because the callback might destroy the
|
||||
// encoder object.
|
||||
struct vec packet;
|
||||
vec_init(&packet, self->current_packet.len);
|
||||
vec_append(&packet, self->current_packet.data,
|
||||
self->current_packet.len);
|
||||
|
||||
vec_clear(&self->current_packet);
|
||||
h264_encoder__schedule_work(self);
|
||||
|
||||
self->base.on_packet_ready(packet.data, packet.len, pts, userdata);
|
||||
vec_destroy(&packet);
|
||||
}
|
||||
|
||||
static int find_render_node(char *node, size_t maxlen) {
|
||||
bool r = -1;
|
||||
drmDevice *devices[64];
|
||||
|
||||
int n = drmGetDevices2(0, devices, sizeof(devices) / sizeof(devices[0]));
|
||||
for (int i = 0; i < n; ++i) {
|
||||
drmDevice *dev = devices[i];
|
||||
if (!(dev->available_nodes & (1 << DRM_NODE_RENDER)))
|
||||
continue;
|
||||
|
||||
strncpy(node, dev->nodes[DRM_NODE_RENDER], maxlen);
|
||||
node[maxlen - 1] = '\0';
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
drmFreeDevices(devices, n);
|
||||
return r;
|
||||
}
|
||||
|
||||
static struct h264_encoder* h264_encoder_ffmpeg_create(uint32_t width,
|
||||
uint32_t height, uint32_t format, int quality)
|
||||
{
|
||||
int rc;
|
||||
|
||||
struct h264_encoder_ffmpeg* self = calloc(1, sizeof(*self));
|
||||
if (!self)
|
||||
return NULL;
|
||||
|
||||
self->base.impl = &h264_encoder_ffmpeg_impl;
|
||||
|
||||
if (vec_init(&self->current_packet, 65536) < 0)
|
||||
goto packet_failure;
|
||||
|
||||
self->work = aml_work_new(h264_encoder__do_work,
|
||||
h264_encoder__on_work_done, self, NULL);
|
||||
if (!self->work)
|
||||
goto worker_failure;
|
||||
|
||||
char render_node[64];
|
||||
if (find_render_node(render_node, sizeof(render_node)) < 0)
|
||||
goto render_node_failure;
|
||||
|
||||
rc = av_hwdevice_ctx_create(&self->hw_device_ctx,
|
||||
AV_HWDEVICE_TYPE_DRM, render_node, NULL, 0);
|
||||
if (rc != 0)
|
||||
goto hwdevice_ctx_failure;
|
||||
|
||||
self->base.next_frame_should_be_keyframe = true;
|
||||
TAILQ_INIT(&self->fb_queue);
|
||||
|
||||
self->width = width;
|
||||
self->height = height;
|
||||
self->format = format;
|
||||
self->timebase = (AVRational){1, 1000000};
|
||||
self->sample_aspect_ratio = (AVRational){1, 1};
|
||||
self->av_pixel_format = drm_to_av_pixel_format(format);
|
||||
if (self->av_pixel_format == AV_PIX_FMT_NONE)
|
||||
goto pix_fmt_failure;
|
||||
|
||||
const AVCodec* codec = avcodec_find_encoder_by_name("h264_vaapi");
|
||||
if (!codec)
|
||||
goto codec_failure;
|
||||
|
||||
if (h264_encoder__init_hw_frames_context(self) < 0)
|
||||
goto hw_frames_context_failure;
|
||||
|
||||
if (h264_encoder__init_filters(self) < 0)
|
||||
goto filter_failure;
|
||||
|
||||
if (h264_encoder__init_codec_context(self, codec, quality) < 0)
|
||||
goto codec_context_failure;
|
||||
|
||||
self->codec_ctx->hw_frames_ctx =
|
||||
av_buffer_ref(self->filter_out->inputs[0]->hw_frames_ctx);
|
||||
|
||||
AVDictionary *opts = NULL;
|
||||
av_dict_set_int(&opts, "async_depth", 1, 0);
|
||||
|
||||
rc = avcodec_open2(self->codec_ctx, codec, &opts);
|
||||
av_dict_free(&opts);
|
||||
|
||||
if (rc != 0)
|
||||
goto avcodec_open_failure;
|
||||
|
||||
return &self->base;
|
||||
|
||||
avcodec_open_failure:
|
||||
avcodec_free_context(&self->codec_ctx);
|
||||
codec_context_failure:
|
||||
filter_failure:
|
||||
av_buffer_unref(&self->hw_frames_ctx);
|
||||
hw_frames_context_failure:
|
||||
codec_failure:
|
||||
pix_fmt_failure:
|
||||
av_buffer_unref(&self->hw_device_ctx);
|
||||
hwdevice_ctx_failure:
|
||||
render_node_failure:
|
||||
aml_unref(self->work);
|
||||
worker_failure:
|
||||
vec_destroy(&self->current_packet);
|
||||
packet_failure:
|
||||
free(self);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void h264_encoder_ffmpeg_destroy(struct h264_encoder* base)
|
||||
{
|
||||
struct h264_encoder_ffmpeg* self = (struct h264_encoder_ffmpeg*)base;
|
||||
|
||||
if (self->current_fb) {
|
||||
self->please_destroy = true;
|
||||
return;
|
||||
}
|
||||
|
||||
vec_destroy(&self->current_packet);
|
||||
av_buffer_unref(&self->hw_frames_ctx);
|
||||
avcodec_free_context(&self->codec_ctx);
|
||||
av_buffer_unref(&self->hw_device_ctx);
|
||||
avfilter_graph_free(&self->filter_graph);
|
||||
aml_unref(self->work);
|
||||
free(self);
|
||||
}
|
||||
|
||||
static void h264_encoder_ffmpeg_feed(struct h264_encoder* base,
|
||||
struct nvnc_fb* fb)
|
||||
{
|
||||
struct h264_encoder_ffmpeg* self = (struct h264_encoder_ffmpeg*)base;
|
||||
assert(fb->type == NVNC_FB_GBM_BO);
|
||||
|
||||
// TODO: Add transform filter
|
||||
assert(fb->transform == NVNC_TRANSFORM_NORMAL);
|
||||
|
||||
int rc = fb_queue_enqueue(&self->fb_queue, fb);
|
||||
assert(rc == 0); // TODO
|
||||
|
||||
nvnc_fb_hold(fb);
|
||||
|
||||
rc = h264_encoder__schedule_work(self);
|
||||
assert(rc == 0); // TODO
|
||||
}
|
||||
|
||||
struct h264_encoder_impl h264_encoder_ffmpeg_impl = {
|
||||
.create = h264_encoder_ffmpeg_create,
|
||||
.destroy = h264_encoder_ffmpeg_destroy,
|
||||
.feed = h264_encoder_ffmpeg_feed,
|
||||
};
|
|
@ -0,0 +1,741 @@
|
|||
/*
|
||||
* Copyright (c) 2024 Andri Yngvason
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
||||
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "h264-encoder.h"
|
||||
#include "neatvnc.h"
|
||||
#include "fb.h"
|
||||
#include "pixels.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <inttypes.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <linux/videodev2.h>
|
||||
#include <drm_fourcc.h>
|
||||
#include <gbm.h>
|
||||
#include <aml.h>
|
||||
#include <dirent.h>
|
||||
|
||||
#define UDIV_UP(a, b) (((a) + (b) - 1) / (b))
|
||||
#define ALIGN_UP(a, b) ((b) * UDIV_UP((a), (b)))
|
||||
#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
|
||||
|
||||
#define N_SRC_BUFS 3
|
||||
#define N_DST_BUFS 3
|
||||
|
||||
struct h264_encoder_v4l2m2m_dst_buf {
|
||||
struct v4l2_buffer buffer;
|
||||
struct v4l2_plane plane;
|
||||
void* payload;
|
||||
};
|
||||
|
||||
struct h264_encoder_v4l2m2m_src_buf {
|
||||
struct v4l2_buffer buffer;
|
||||
struct v4l2_plane planes[4];
|
||||
int fd;
|
||||
bool is_taken;
|
||||
struct nvnc_fb* fb;
|
||||
};
|
||||
|
||||
struct h264_encoder_v4l2m2m {
|
||||
struct h264_encoder base;
|
||||
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t format;
|
||||
int quality; // TODO: Can we affect the quality?
|
||||
|
||||
char driver[16];
|
||||
|
||||
int fd;
|
||||
struct aml_handler* handler;
|
||||
|
||||
struct h264_encoder_v4l2m2m_src_buf src_bufs[N_SRC_BUFS];
|
||||
int src_buf_index;
|
||||
|
||||
struct h264_encoder_v4l2m2m_dst_buf dst_bufs[N_DST_BUFS];
|
||||
};
|
||||
|
||||
struct h264_encoder_impl h264_encoder_v4l2m2m_impl;
|
||||
|
||||
static int v4l2_qbuf(int fd, const struct v4l2_buffer* inbuf)
|
||||
{
|
||||
assert(inbuf->length <= 4);
|
||||
struct v4l2_plane planes[4];
|
||||
struct v4l2_buffer outbuf;
|
||||
outbuf = *inbuf;
|
||||
memcpy(&planes, inbuf->m.planes, inbuf->length * sizeof(planes[0]));
|
||||
outbuf.m.planes = planes;
|
||||
return ioctl(fd, VIDIOC_QBUF, &outbuf);
|
||||
}
|
||||
|
||||
static inline int v4l2_dqbuf(int fd, struct v4l2_buffer* buf)
|
||||
{
|
||||
return ioctl(fd, VIDIOC_DQBUF, buf);
|
||||
}
|
||||
|
||||
static struct h264_encoder_v4l2m2m_src_buf* take_src_buffer(
|
||||
struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
unsigned int count = 0;
|
||||
int i = self->src_buf_index;
|
||||
|
||||
struct h264_encoder_v4l2m2m_src_buf* buffer;
|
||||
do {
|
||||
buffer = &self->src_bufs[i++];
|
||||
i %= ARRAY_LENGTH(self->src_bufs);
|
||||
} while (++count < ARRAY_LENGTH(self->src_bufs) && buffer->is_taken);
|
||||
|
||||
if (buffer->is_taken)
|
||||
return NULL;
|
||||
|
||||
self->src_buf_index = i;
|
||||
buffer->is_taken = true;
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static bool any_src_buf_is_taken(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
bool result = false;
|
||||
for (unsigned int i = 0; i < ARRAY_LENGTH(self->src_bufs); ++i)
|
||||
if (self->src_bufs[i].is_taken)
|
||||
result = true;
|
||||
return result;
|
||||
}
|
||||
|
||||
static int u32_cmp(const void* pa, const void* pb)
|
||||
{
|
||||
const uint32_t *a = pa;
|
||||
const uint32_t *b = pb;
|
||||
return *a < *b ? -1 : *a > *b;
|
||||
}
|
||||
|
||||
static size_t get_supported_formats(struct h264_encoder_v4l2m2m* self,
|
||||
uint32_t* formats, size_t max_len)
|
||||
{
|
||||
size_t i = 0;
|
||||
for (;; ++i) {
|
||||
struct v4l2_fmtdesc desc = {
|
||||
.index = i,
|
||||
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
|
||||
};
|
||||
int rc = ioctl(self->fd, VIDIOC_ENUM_FMT, &desc);
|
||||
if (rc < 0)
|
||||
break;
|
||||
|
||||
nvnc_trace("Got pixel format: %s", desc.description);
|
||||
|
||||
formats[i] = desc.pixelformat;
|
||||
}
|
||||
|
||||
qsort(formats, i, sizeof(*formats), u32_cmp);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static bool have_v4l2_format(const uint32_t* formats, size_t n_formats,
|
||||
uint32_t format)
|
||||
{
|
||||
return bsearch(&format, formats, n_formats, sizeof(format), u32_cmp);
|
||||
}
|
||||
|
||||
static uint32_t v4l2_format_from_drm(const uint32_t* formats,
|
||||
size_t n_formats, uint32_t drm_format)
|
||||
{
|
||||
#define TRY_FORMAT(f) \
|
||||
if (have_v4l2_format(formats, n_formats, f)) \
|
||||
return f
|
||||
|
||||
switch (drm_format) {
|
||||
case DRM_FORMAT_RGBX8888:
|
||||
case DRM_FORMAT_RGBA8888:
|
||||
TRY_FORMAT(V4L2_PIX_FMT_RGBX32);
|
||||
TRY_FORMAT(V4L2_PIX_FMT_RGBA32);
|
||||
break;
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
TRY_FORMAT(V4L2_PIX_FMT_XRGB32);
|
||||
TRY_FORMAT(V4L2_PIX_FMT_ARGB32);
|
||||
TRY_FORMAT(V4L2_PIX_FMT_RGB32);
|
||||
break;
|
||||
case DRM_FORMAT_BGRX8888:
|
||||
case DRM_FORMAT_BGRA8888:
|
||||
TRY_FORMAT(V4L2_PIX_FMT_XBGR32);
|
||||
TRY_FORMAT(V4L2_PIX_FMT_ABGR32);
|
||||
TRY_FORMAT(V4L2_PIX_FMT_BGR32);
|
||||
break;
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
TRY_FORMAT(V4L2_PIX_FMT_BGRX32);
|
||||
TRY_FORMAT(V4L2_PIX_FMT_BGRA32);
|
||||
break;
|
||||
// TODO: More formats
|
||||
}
|
||||
|
||||
return 0;
|
||||
#undef TRY_FORMAT
|
||||
}
|
||||
|
||||
// This driver mixes up pixel formats...
|
||||
static uint32_t v4l2_format_from_drm_bcm2835(const uint32_t* formats,
|
||||
size_t n_formats, uint32_t drm_format)
|
||||
{
|
||||
switch (drm_format) {
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
return V4L2_PIX_FMT_RGBA32;
|
||||
case DRM_FORMAT_BGRX8888:
|
||||
case DRM_FORMAT_BGRA8888:
|
||||
// TODO: This could also be ABGR, based on how this driver
|
||||
// behaves
|
||||
return V4L2_PIX_FMT_BGR32;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_src_fmt(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
int rc;
|
||||
|
||||
uint32_t supported_formats[256];
|
||||
size_t n_formats = get_supported_formats(self, supported_formats,
|
||||
ARRAY_LENGTH(supported_formats));
|
||||
|
||||
uint32_t format;
|
||||
if (strcmp(self->driver, "bcm2835-codec") == 0)
|
||||
format = v4l2_format_from_drm_bcm2835(supported_formats,
|
||||
n_formats, self->format);
|
||||
else
|
||||
format = v4l2_format_from_drm(supported_formats, n_formats,
|
||||
self->format);
|
||||
if (!format) {
|
||||
nvnc_log(NVNC_LOG_DEBUG, "Failed to find a proper pixel format");
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct v4l2_format fmt = {
|
||||
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
|
||||
};
|
||||
rc = ioctl(self->fd, VIDIOC_G_FMT, &fmt);
|
||||
if (rc < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct v4l2_pix_format_mplane* pix_fmt = &fmt.fmt.pix_mp;
|
||||
pix_fmt->pixelformat = format;
|
||||
pix_fmt->width = ALIGN_UP(self->width, 16);
|
||||
pix_fmt->height = ALIGN_UP(self->height, 16);
|
||||
|
||||
rc = ioctl(self->fd, VIDIOC_S_FMT, &fmt);
|
||||
if (rc < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_dst_fmt(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
int rc;
|
||||
|
||||
struct v4l2_format fmt = {
|
||||
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
|
||||
};
|
||||
rc = ioctl(self->fd, VIDIOC_G_FMT, &fmt);
|
||||
if (rc < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct v4l2_pix_format_mplane* pix_fmt = &fmt.fmt.pix_mp;
|
||||
pix_fmt->pixelformat = V4L2_PIX_FMT_H264;
|
||||
pix_fmt->width = self->width;
|
||||
pix_fmt->height = self->height;
|
||||
|
||||
rc = ioctl(self->fd, VIDIOC_S_FMT, &fmt);
|
||||
if (rc < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alloc_dst_buffers(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
int n_bufs = ARRAY_LENGTH(self->dst_bufs);
|
||||
int rc;
|
||||
|
||||
struct v4l2_requestbuffers req = {
|
||||
.memory = V4L2_MEMORY_MMAP,
|
||||
.count = n_bufs,
|
||||
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
|
||||
};
|
||||
rc = ioctl(self->fd, VIDIOC_REQBUFS, &req);
|
||||
if (rc < 0)
|
||||
return -1;
|
||||
|
||||
for (unsigned int i = 0; i < req.count; ++i) {
|
||||
struct h264_encoder_v4l2m2m_dst_buf* buffer = &self->dst_bufs[i];
|
||||
struct v4l2_buffer* buf = &buffer->buffer;
|
||||
|
||||
buf->index = i;
|
||||
buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
|
||||
buf->memory = V4L2_MEMORY_MMAP;
|
||||
buf->length = 1;
|
||||
buf->m.planes = &buffer->plane;
|
||||
|
||||
rc = ioctl(self->fd, VIDIOC_QUERYBUF, buf);
|
||||
if (rc < 0)
|
||||
return -1;
|
||||
|
||||
buffer->payload = mmap(0, buffer->plane.length,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED, self->fd,
|
||||
buffer->plane.m.mem_offset);
|
||||
if (buffer->payload == MAP_FAILED) {
|
||||
nvnc_log(NVNC_LOG_ERROR, "Whoops, mapping failed: %m");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void enqueue_dst_buffers(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
for (unsigned int i = 0; i < ARRAY_LENGTH(self->dst_bufs); ++i) {
|
||||
int rc = v4l2_qbuf(self->fd, &self->dst_bufs[i].buffer);
|
||||
assert(rc >= 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void process_dst_bufs(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
int rc;
|
||||
struct v4l2_plane plane = { 0 };
|
||||
struct v4l2_buffer buf = {
|
||||
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
|
||||
.memory = V4L2_MEMORY_MMAP,
|
||||
.length = 1,
|
||||
.m.planes = &plane,
|
||||
};
|
||||
|
||||
while (true) {
|
||||
rc = v4l2_dqbuf(self->fd, &buf);
|
||||
if (rc < 0)
|
||||
break;
|
||||
|
||||
uint64_t pts = buf.timestamp.tv_sec * UINT64_C(1000000) +
|
||||
buf.timestamp.tv_usec;
|
||||
struct h264_encoder_v4l2m2m_dst_buf* dstbuf =
|
||||
&self->dst_bufs[buf.index];
|
||||
size_t size = buf.m.planes[0].bytesused;
|
||||
|
||||
static uint64_t last_pts;
|
||||
if (last_pts && last_pts > pts) {
|
||||
nvnc_log(NVNC_LOG_ERROR, "pts - last_pts = %"PRIi64,
|
||||
(int64_t)pts - (int64_t)last_pts);
|
||||
}
|
||||
last_pts = pts;
|
||||
|
||||
nvnc_trace("Encoded frame (index %d) at %"PRIu64" µs with size: %zu",
|
||||
buf.index, pts, size);
|
||||
|
||||
self->base.on_packet_ready(dstbuf->payload, size, pts,
|
||||
self->base.userdata);
|
||||
|
||||
v4l2_qbuf(self->fd, &buf);
|
||||
}
|
||||
}
|
||||
|
||||
static void process_src_bufs(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
int rc;
|
||||
struct v4l2_plane planes[4] = { 0 };
|
||||
struct v4l2_buffer buf = {
|
||||
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
|
||||
.memory = V4L2_MEMORY_DMABUF,
|
||||
.length = 1,
|
||||
.m.planes = planes,
|
||||
};
|
||||
|
||||
while (true) {
|
||||
rc = v4l2_dqbuf(self->fd, &buf);
|
||||
if (rc < 0)
|
||||
break;
|
||||
|
||||
struct h264_encoder_v4l2m2m_src_buf* srcbuf =
|
||||
&self->src_bufs[buf.index];
|
||||
srcbuf->is_taken = false;
|
||||
|
||||
// TODO: This assumes that there's only one fd
|
||||
close(srcbuf->planes[0].m.fd);
|
||||
|
||||
nvnc_fb_unmap(srcbuf->fb);
|
||||
nvnc_fb_release(srcbuf->fb);
|
||||
nvnc_fb_unref(srcbuf->fb);
|
||||
srcbuf->fb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void stream_off(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
int type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
|
||||
ioctl(self->fd, VIDIOC_STREAMOFF, &type);
|
||||
|
||||
type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
|
||||
ioctl(self->fd, VIDIOC_STREAMOFF, &type);
|
||||
}
|
||||
|
||||
static void free_dst_buffers(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
for (unsigned int i = 0; i < ARRAY_LENGTH(self->dst_bufs); ++i) {
|
||||
struct h264_encoder_v4l2m2m_dst_buf* buf = &self->dst_bufs[i];
|
||||
munmap(buf->payload, buf->plane.length);
|
||||
}
|
||||
}
|
||||
|
||||
static int stream_on(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
int type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
|
||||
ioctl(self->fd, VIDIOC_STREAMON, &type);
|
||||
|
||||
type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
|
||||
return ioctl(self->fd, VIDIOC_STREAMON, &type);
|
||||
}
|
||||
|
||||
static int alloc_src_buffers(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
int rc;
|
||||
|
||||
struct v4l2_requestbuffers req = {
|
||||
.memory = V4L2_MEMORY_DMABUF,
|
||||
.count = N_SRC_BUFS,
|
||||
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
|
||||
};
|
||||
rc = ioctl(self->fd, VIDIOC_REQBUFS, &req);
|
||||
if (rc < 0)
|
||||
return -1;
|
||||
|
||||
for (int i = 0; i < N_SRC_BUFS; ++i) {
|
||||
struct h264_encoder_v4l2m2m_src_buf* buffer = &self->src_bufs[i];
|
||||
struct v4l2_buffer* buf = &buffer->buffer;
|
||||
|
||||
buf->index = i;
|
||||
buf->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
|
||||
buf->memory = V4L2_MEMORY_DMABUF;
|
||||
buf->length = 1;
|
||||
buf->m.planes = buffer->planes;
|
||||
|
||||
rc = ioctl(self->fd, VIDIOC_QUERYBUF, buf);
|
||||
if (rc < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void force_key_frame(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
struct v4l2_control ctrl = { 0 };
|
||||
ctrl.id = V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME;
|
||||
ctrl.value = 0;
|
||||
ioctl(self->fd, VIDIOC_S_CTRL, &ctrl);
|
||||
}
|
||||
|
||||
static void encode_buffer(struct h264_encoder_v4l2m2m* self,
|
||||
struct nvnc_fb* fb)
|
||||
{
|
||||
struct h264_encoder_v4l2m2m_src_buf* srcbuf = take_src_buffer(self);
|
||||
if (!srcbuf) {
|
||||
nvnc_log(NVNC_LOG_ERROR, "Out of source buffers. Dropping frame...");
|
||||
return;
|
||||
}
|
||||
|
||||
assert(!srcbuf->fb);
|
||||
|
||||
nvnc_fb_ref(fb);
|
||||
nvnc_fb_hold(fb);
|
||||
|
||||
/* For some reason the v4l2m2m h264 encoder in the Rapberry Pi 4 gets
|
||||
* really glitchy unless the buffer is mapped first.
|
||||
* This should probably be handled by the driver, but it's not.
|
||||
*/
|
||||
nvnc_fb_map(fb);
|
||||
|
||||
srcbuf->fb = fb;
|
||||
|
||||
struct gbm_bo* bo = nvnc_fb_get_gbm_bo(fb);
|
||||
|
||||
int n_planes = gbm_bo_get_plane_count(bo);
|
||||
int fd = gbm_bo_get_fd(bo);
|
||||
uint32_t height = ALIGN_UP(gbm_bo_get_height(bo), 16);
|
||||
|
||||
for (int i = 0; i < n_planes; ++i) {
|
||||
uint32_t stride = gbm_bo_get_stride_for_plane(bo, i);
|
||||
uint32_t offset = gbm_bo_get_offset(bo, i);
|
||||
uint32_t size = stride * height;
|
||||
|
||||
srcbuf->buffer.m.planes[i].m.fd = fd;
|
||||
srcbuf->buffer.m.planes[i].bytesused = size;
|
||||
srcbuf->buffer.m.planes[i].length = size;
|
||||
srcbuf->buffer.m.planes[i].data_offset = offset;
|
||||
}
|
||||
|
||||
srcbuf->buffer.timestamp.tv_sec = fb->pts / UINT64_C(1000000);
|
||||
srcbuf->buffer.timestamp.tv_usec = fb->pts % UINT64_C(1000000);
|
||||
|
||||
if (self->base.next_frame_should_be_keyframe)
|
||||
force_key_frame(self);
|
||||
self->base.next_frame_should_be_keyframe = false;
|
||||
|
||||
int rc = v4l2_qbuf(self->fd, &srcbuf->buffer);
|
||||
if (rc < 0) {
|
||||
nvnc_log(NVNC_LOG_PANIC, "Failed to enqueue buffer: %m");
|
||||
}
|
||||
}
|
||||
|
||||
static void process_fd_events(void* handle)
|
||||
{
|
||||
struct h264_encoder_v4l2m2m* self = aml_get_userdata(handle);
|
||||
process_dst_bufs(self);
|
||||
}
|
||||
|
||||
static void h264_encoder_v4l2m2m_configure(struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
struct v4l2_control ctrl = { 0 };
|
||||
|
||||
ctrl.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE;
|
||||
ctrl.value = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE;
|
||||
ioctl(self->fd, VIDIOC_S_CTRL, &ctrl);
|
||||
|
||||
ctrl.id = V4L2_CID_MPEG_VIDEO_H264_I_PERIOD;
|
||||
ctrl.value = INT_MAX;
|
||||
ioctl(self->fd, VIDIOC_S_CTRL, &ctrl);
|
||||
|
||||
ctrl.id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE;
|
||||
ctrl.value = V4L2_MPEG_VIDEO_BITRATE_MODE_CQ;
|
||||
ioctl(self->fd, VIDIOC_S_CTRL, &ctrl);
|
||||
|
||||
ctrl.id = V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY;
|
||||
ctrl.value = self->quality;
|
||||
ioctl(self->fd, VIDIOC_S_CTRL, &ctrl);
|
||||
}
|
||||
|
||||
static bool can_encode_to_h264(int fd)
|
||||
{
|
||||
size_t i = 0;
|
||||
for (;; ++i) {
|
||||
struct v4l2_fmtdesc desc = {
|
||||
.index = i,
|
||||
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
|
||||
};
|
||||
int rc = ioctl(fd, VIDIOC_ENUM_FMT, &desc);
|
||||
if (rc < 0)
|
||||
break;
|
||||
|
||||
if (desc.pixelformat == V4L2_PIX_FMT_H264)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool can_handle_frame_size(int fd, uint32_t width, uint32_t height)
|
||||
{
|
||||
size_t i = 0;
|
||||
for (;; ++i) {
|
||||
struct v4l2_frmsizeenum size = {
|
||||
.index = i,
|
||||
.pixel_format = V4L2_PIX_FMT_H264,
|
||||
};
|
||||
int rc = ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &size);
|
||||
if (rc < 0)
|
||||
break;
|
||||
|
||||
switch (size.type) {
|
||||
case V4L2_FRMSIZE_TYPE_DISCRETE:
|
||||
if (size.discrete.width == width &&
|
||||
size.discrete.height == height)
|
||||
return true;
|
||||
break;
|
||||
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
|
||||
case V4L2_FRMSIZE_TYPE_STEPWISE:
|
||||
if (size.stepwise.min_width <= width &&
|
||||
width <= size.stepwise.max_width &&
|
||||
size.stepwise.min_height <= height &&
|
||||
height <= size.stepwise.max_height &&
|
||||
(16 % size.stepwise.step_width) == 0 &&
|
||||
(16 % size.stepwise.step_height) == 0)
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_device_capable(int fd, uint32_t width, uint32_t height)
|
||||
{
|
||||
struct v4l2_capability cap = { 0 };
|
||||
int rc = ioctl(fd, VIDIOC_QUERYCAP, &cap);
|
||||
if (rc < 0)
|
||||
return false;
|
||||
|
||||
uint32_t required_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
|
||||
if ((cap.capabilities & required_caps) != required_caps)
|
||||
return false;
|
||||
|
||||
if (!can_encode_to_h264(fd))
|
||||
return false;
|
||||
|
||||
if (!can_handle_frame_size(fd, width, height))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int find_capable_device(uint32_t width, uint32_t height)
|
||||
{
|
||||
int fd = -1;
|
||||
DIR *dir = opendir("/dev");
|
||||
assert(dir);
|
||||
|
||||
for (;;) {
|
||||
struct dirent* entry = readdir(dir);
|
||||
if (!entry)
|
||||
break;
|
||||
|
||||
if (strncmp(entry->d_name, "video", 5) != 0)
|
||||
continue;
|
||||
|
||||
char path[256];
|
||||
snprintf(path, sizeof(path), "/dev/%s", entry->d_name);
|
||||
fd = open(path, O_RDWR | O_CLOEXEC);
|
||||
if (fd < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (is_device_capable(fd, width, height)) {
|
||||
nvnc_log(NVNC_LOG_DEBUG, "Using v4l2m2m device: %s",
|
||||
path);
|
||||
break;
|
||||
}
|
||||
close(fd);
|
||||
fd = -1;
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
return fd;
|
||||
}
|
||||
|
||||
static struct h264_encoder* h264_encoder_v4l2m2m_create(uint32_t width,
|
||||
uint32_t height, uint32_t format, int quality)
|
||||
{
|
||||
struct h264_encoder_v4l2m2m* self = calloc(1, sizeof(*self));
|
||||
if (!self)
|
||||
return NULL;
|
||||
|
||||
self->base.impl = &h264_encoder_v4l2m2m_impl;
|
||||
self->fd = -1;
|
||||
self->width = width;
|
||||
self->height = height;
|
||||
self->format = format;
|
||||
self->quality = quality;
|
||||
|
||||
self->fd = find_capable_device(width, height);
|
||||
if (self->fd < 0)
|
||||
goto failure;
|
||||
|
||||
struct v4l2_capability cap = { 0 };
|
||||
ioctl(self->fd, VIDIOC_QUERYCAP, &cap);
|
||||
strncpy(self->driver, (const char*)cap.driver, sizeof(self->driver));
|
||||
|
||||
if (set_src_fmt(self) < 0)
|
||||
goto failure;
|
||||
|
||||
if (set_dst_fmt(self) < 0)
|
||||
goto failure;
|
||||
|
||||
h264_encoder_v4l2m2m_configure(self);
|
||||
|
||||
if (alloc_dst_buffers(self) < 0)
|
||||
goto failure;
|
||||
|
||||
if (alloc_src_buffers(self) < 0)
|
||||
goto failure;
|
||||
|
||||
enqueue_dst_buffers(self);
|
||||
|
||||
if (stream_on(self) < 0)
|
||||
goto failure;
|
||||
|
||||
int flags = fcntl(self->fd, F_GETFL);
|
||||
fcntl(self->fd, F_SETFL, flags | O_NONBLOCK);
|
||||
|
||||
self->handler = aml_handler_new(self->fd, process_fd_events, self, NULL);
|
||||
aml_set_event_mask(self->handler, AML_EVENT_READ);
|
||||
|
||||
if (aml_start(aml_get_default(), self->handler) < 0) {
|
||||
aml_unref(self->handler);
|
||||
goto failure;
|
||||
}
|
||||
|
||||
return &self->base;
|
||||
|
||||
failure:
|
||||
if (self->fd >= 0)
|
||||
close(self->fd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void claim_all_src_bufs(
|
||||
struct h264_encoder_v4l2m2m* self)
|
||||
{
|
||||
for (;;) {
|
||||
process_src_bufs(self);
|
||||
if (!any_src_buf_is_taken(self))
|
||||
break;
|
||||
usleep(10000);
|
||||
}
|
||||
}
|
||||
|
||||
static void h264_encoder_v4l2m2m_destroy(struct h264_encoder* base)
|
||||
{
|
||||
struct h264_encoder_v4l2m2m* self = (struct h264_encoder_v4l2m2m*)base;
|
||||
claim_all_src_bufs(self);
|
||||
aml_stop(aml_get_default(), self->handler);
|
||||
aml_unref(self->handler);
|
||||
stream_off(self);
|
||||
free_dst_buffers(self);
|
||||
if (self->fd >= 0)
|
||||
close(self->fd);
|
||||
free(self);
|
||||
}
|
||||
|
||||
static void h264_encoder_v4l2m2m_feed(struct h264_encoder* base,
|
||||
struct nvnc_fb* fb)
|
||||
{
|
||||
struct h264_encoder_v4l2m2m* self = (struct h264_encoder_v4l2m2m*)base;
|
||||
process_src_bufs(self);
|
||||
encode_buffer(self, fb);
|
||||
}
|
||||
|
||||
struct h264_encoder_impl h264_encoder_v4l2m2m_impl = {
|
||||
.create = h264_encoder_v4l2m2m_create,
|
||||
.destroy = h264_encoder_v4l2m2m_destroy,
|
||||
.feed = h264_encoder_v4l2m2m_feed,
|
||||
};
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2021 - 2022 Andri Yngvason
|
||||
* Copyright (c) 2024 Andri Yngvason
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -15,614 +15,60 @@
|
|||
*/
|
||||
|
||||
#include "h264-encoder.h"
|
||||
#include "neatvnc.h"
|
||||
#include "fb.h"
|
||||
#include "sys/queue.h"
|
||||
#include "vec.h"
|
||||
#include "usdt.h"
|
||||
#include "config.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <unistd.h>
|
||||
#include <assert.h>
|
||||
#include <gbm.h>
|
||||
#include <xf86drm.h>
|
||||
#include <aml.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavutil/hwcontext_drm.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
#include <libavutil/dict.h>
|
||||
#include <libavfilter/avfilter.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
|
||||
#include <libdrm/drm_fourcc.h>
|
||||
|
||||
struct h264_encoder;
|
||||
|
||||
struct fb_queue_entry {
|
||||
struct nvnc_fb* fb;
|
||||
TAILQ_ENTRY(fb_queue_entry) link;
|
||||
};
|
||||
|
||||
TAILQ_HEAD(fb_queue, fb_queue_entry);
|
||||
|
||||
struct h264_encoder {
|
||||
h264_encoder_packet_handler_fn on_packet_ready;
|
||||
void* userdata;
|
||||
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t format;
|
||||
|
||||
AVRational timebase;
|
||||
AVRational sample_aspect_ratio;
|
||||
enum AVPixelFormat av_pixel_format;
|
||||
|
||||
/* type: AVHWDeviceContext */
|
||||
AVBufferRef* hw_device_ctx;
|
||||
|
||||
/* type: AVHWFramesContext */
|
||||
AVBufferRef* hw_frames_ctx;
|
||||
|
||||
AVCodecContext* codec_ctx;
|
||||
|
||||
AVFilterGraph* filter_graph;
|
||||
AVFilterContext* filter_in;
|
||||
AVFilterContext* filter_out;
|
||||
|
||||
bool next_frame_should_be_keyframe;
|
||||
struct fb_queue fb_queue;
|
||||
|
||||
struct aml_work* work;
|
||||
struct nvnc_fb* current_fb;
|
||||
struct vec current_packet;
|
||||
bool current_frame_is_keyframe;
|
||||
|
||||
bool please_destroy;
|
||||
};
|
||||
|
||||
static enum AVPixelFormat drm_to_av_pixel_format(uint32_t format)
|
||||
{
|
||||
switch (format) {
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
return AV_PIX_FMT_BGR0;
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
return AV_PIX_FMT_RGB0;
|
||||
case DRM_FORMAT_RGBX8888:
|
||||
case DRM_FORMAT_RGBA8888:
|
||||
return AV_PIX_FMT_0BGR;
|
||||
case DRM_FORMAT_BGRX8888:
|
||||
case DRM_FORMAT_BGRA8888:
|
||||
return AV_PIX_FMT_0RGB;
|
||||
}
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static void hw_frame_desc_free(void* opaque, uint8_t* data)
|
||||
{
|
||||
struct AVDRMFrameDescriptor* desc = (void*)data;
|
||||
assert(desc);
|
||||
|
||||
for (int i = 0; i < desc->nb_objects; ++i)
|
||||
close(desc->objects[i].fd);
|
||||
|
||||
free(desc);
|
||||
}
|
||||
|
||||
// TODO: Maybe do this once per frame inside nvnc_fb?
|
||||
static AVFrame* fb_to_avframe(struct nvnc_fb* fb)
|
||||
{
|
||||
struct gbm_bo* bo = fb->bo;
|
||||
|
||||
int n_planes = gbm_bo_get_plane_count(bo);
|
||||
|
||||
AVDRMFrameDescriptor* desc = calloc(1, sizeof(*desc));
|
||||
desc->nb_objects = n_planes;
|
||||
|
||||
desc->nb_layers = 1;
|
||||
desc->layers[0].format = gbm_bo_get_format(bo);
|
||||
desc->layers[0].nb_planes = n_planes;
|
||||
|
||||
for (int i = 0; i < n_planes; ++i) {
|
||||
uint32_t stride = gbm_bo_get_stride_for_plane(bo, i);
|
||||
|
||||
desc->objects[i].fd = gbm_bo_get_fd_for_plane(bo, i);
|
||||
desc->objects[i].size = stride * fb->height;
|
||||
desc->objects[i].format_modifier = gbm_bo_get_modifier(bo);
|
||||
|
||||
desc->layers[0].format = gbm_bo_get_format(bo);
|
||||
desc->layers[0].planes[i].object_index = i;
|
||||
desc->layers[0].planes[i].offset = gbm_bo_get_offset(bo, i);
|
||||
desc->layers[0].planes[i].pitch = stride;
|
||||
}
|
||||
|
||||
AVFrame* frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
hw_frame_desc_free(NULL, (void*)desc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
frame->opaque = fb;
|
||||
frame->width = fb->width;
|
||||
frame->height = fb->height;
|
||||
frame->format = AV_PIX_FMT_DRM_PRIME;
|
||||
frame->sample_aspect_ratio = (AVRational){1, 1};
|
||||
|
||||
AVBufferRef* desc_ref = av_buffer_create((void*)desc, sizeof(*desc),
|
||||
hw_frame_desc_free, NULL, 0);
|
||||
if (!desc_ref) {
|
||||
hw_frame_desc_free(NULL, (void*)desc);
|
||||
av_frame_free(&frame);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
frame->buf[0] = desc_ref;
|
||||
frame->data[0] = (void*)desc_ref->data;
|
||||
|
||||
// TODO: Set colorspace?
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
static struct nvnc_fb* fb_queue_dequeue(struct fb_queue* queue)
|
||||
{
|
||||
if (TAILQ_EMPTY(queue))
|
||||
return NULL;
|
||||
|
||||
struct fb_queue_entry* entry = TAILQ_FIRST(queue);
|
||||
TAILQ_REMOVE(queue, entry, link);
|
||||
struct nvnc_fb* fb = entry->fb;
|
||||
free(entry);
|
||||
|
||||
return fb;
|
||||
}
|
||||
|
||||
static int fb_queue_enqueue(struct fb_queue* queue, struct nvnc_fb* fb)
|
||||
{
|
||||
struct fb_queue_entry* entry = calloc(1, sizeof(*entry));
|
||||
if (!entry)
|
||||
return -1;
|
||||
|
||||
entry->fb = fb;
|
||||
nvnc_fb_ref(fb);
|
||||
TAILQ_INSERT_TAIL(queue, entry, link);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int h264_encoder__init_buffersrc(struct h264_encoder* self)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* Placeholder values are used to pacify input checking and the real
|
||||
* values are set below.
|
||||
*/
|
||||
rc = avfilter_graph_create_filter(&self->filter_in,
|
||||
avfilter_get_by_name("buffer"), "in",
|
||||
"width=1:height=1:pix_fmt=drm_prime:time_base=1/1", NULL,
|
||||
self->filter_graph);
|
||||
if (rc != 0)
|
||||
return -1;
|
||||
|
||||
AVBufferSrcParameters *params = av_buffersrc_parameters_alloc();
|
||||
if (!params)
|
||||
return -1;
|
||||
|
||||
params->format = AV_PIX_FMT_DRM_PRIME;
|
||||
params->width = self->width;
|
||||
params->height = self->height;
|
||||
params->sample_aspect_ratio = self->sample_aspect_ratio;
|
||||
params->time_base = self->timebase;
|
||||
params->hw_frames_ctx = self->hw_frames_ctx;
|
||||
|
||||
rc = av_buffersrc_parameters_set(self->filter_in, params);
|
||||
assert(rc == 0);
|
||||
|
||||
av_free(params);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int h264_encoder__init_filters(struct h264_encoder* self)
|
||||
{
|
||||
int rc;
|
||||
|
||||
self->filter_graph = avfilter_graph_alloc();
|
||||
if (!self->filter_graph)
|
||||
return -1;
|
||||
|
||||
rc = h264_encoder__init_buffersrc(self);
|
||||
if (rc != 0)
|
||||
goto failure;
|
||||
|
||||
rc = avfilter_graph_create_filter(&self->filter_out,
|
||||
avfilter_get_by_name("buffersink"), "out", NULL,
|
||||
NULL, self->filter_graph);
|
||||
if (rc != 0)
|
||||
goto failure;
|
||||
|
||||
AVFilterInOut* inputs = avfilter_inout_alloc();
|
||||
if (!inputs)
|
||||
goto failure;
|
||||
|
||||
inputs->name = av_strdup("in");
|
||||
inputs->filter_ctx = self->filter_in;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
AVFilterInOut* outputs = avfilter_inout_alloc();
|
||||
if (!outputs) {
|
||||
avfilter_inout_free(&inputs);
|
||||
goto failure;
|
||||
}
|
||||
|
||||
outputs->name = av_strdup("out");
|
||||
outputs->filter_ctx = self->filter_out;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
rc = avfilter_graph_parse(self->filter_graph,
|
||||
"hwmap=mode=direct:derive_device=vaapi"
|
||||
",scale_vaapi=format=nv12:mode=fast",
|
||||
outputs, inputs, NULL);
|
||||
if (rc != 0)
|
||||
goto failure;
|
||||
|
||||
assert(self->hw_device_ctx);
|
||||
|
||||
for (unsigned int i = 0; i < self->filter_graph->nb_filters; ++i) {
|
||||
self->filter_graph->filters[i]->hw_device_ctx =
|
||||
av_buffer_ref(self->hw_device_ctx);
|
||||
}
|
||||
|
||||
rc = avfilter_graph_config(self->filter_graph, NULL);
|
||||
if (rc != 0)
|
||||
goto failure;
|
||||
|
||||
return 0;
|
||||
|
||||
failure:
|
||||
avfilter_graph_free(&self->filter_graph);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int h264_encoder__init_codec_context(struct h264_encoder* self,
|
||||
const AVCodec* codec, int quality)
|
||||
{
|
||||
self->codec_ctx = avcodec_alloc_context3(codec);
|
||||
if (!self->codec_ctx)
|
||||
return -1;
|
||||
|
||||
struct AVCodecContext* c = self->codec_ctx;
|
||||
c->width = self->width;
|
||||
c->height = self->height;
|
||||
c->time_base = self->timebase;
|
||||
c->sample_aspect_ratio = self->sample_aspect_ratio;
|
||||
c->pix_fmt = AV_PIX_FMT_VAAPI;
|
||||
c->gop_size = INT32_MAX; /* We'll select key frames manually */
|
||||
c->max_b_frames = 0; /* B-frames are bad for latency */
|
||||
c->global_quality = quality;
|
||||
|
||||
/* open-h264 requires baseline profile, so we use constrained
|
||||
* baseline.
|
||||
*/
|
||||
c->profile = 578;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int h264_encoder__init_hw_frames_context(struct h264_encoder* self)
|
||||
{
|
||||
self->hw_frames_ctx = av_hwframe_ctx_alloc(self->hw_device_ctx);
|
||||
if (!self->hw_frames_ctx)
|
||||
return -1;
|
||||
|
||||
AVHWFramesContext* c = (AVHWFramesContext*)self->hw_frames_ctx->data;
|
||||
c->format = AV_PIX_FMT_DRM_PRIME;
|
||||
c->sw_format = drm_to_av_pixel_format(self->format);
|
||||
c->width = self->width;
|
||||
c->height = self->height;
|
||||
|
||||
if (av_hwframe_ctx_init(self->hw_frames_ctx) < 0)
|
||||
av_buffer_unref(&self->hw_frames_ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int h264_encoder__schedule_work(struct h264_encoder* self)
|
||||
{
|
||||
if (self->current_fb)
|
||||
return 0;
|
||||
|
||||
self->current_fb = fb_queue_dequeue(&self->fb_queue);
|
||||
if (!self->current_fb)
|
||||
return 0;
|
||||
|
||||
DTRACE_PROBE1(neatvnc, h264_encode_frame_begin, self->current_fb->pts);
|
||||
|
||||
self->current_frame_is_keyframe = self->next_frame_should_be_keyframe;
|
||||
self->next_frame_should_be_keyframe = false;
|
||||
|
||||
return aml_start(aml_get_default(), self->work);
|
||||
}
|
||||
|
||||
static int h264_encoder__encode(struct h264_encoder* self, AVFrame* frame_in)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = av_buffersrc_add_frame_flags(self->filter_in, frame_in,
|
||||
AV_BUFFERSRC_FLAG_KEEP_REF);
|
||||
if (rc != 0)
|
||||
return -1;
|
||||
|
||||
AVFrame* filtered_frame = av_frame_alloc();
|
||||
if (!filtered_frame)
|
||||
return -1;
|
||||
|
||||
rc = av_buffersink_get_frame(self->filter_out, filtered_frame);
|
||||
if (rc != 0)
|
||||
goto get_frame_failure;
|
||||
|
||||
rc = avcodec_send_frame(self->codec_ctx, filtered_frame);
|
||||
if (rc != 0)
|
||||
goto send_frame_failure;
|
||||
|
||||
AVPacket* packet = av_packet_alloc();
|
||||
assert(packet); // TODO
|
||||
|
||||
while (1) {
|
||||
rc = avcodec_receive_packet(self->codec_ctx, packet);
|
||||
if (rc != 0)
|
||||
break;
|
||||
|
||||
vec_append(&self->current_packet, packet->data, packet->size);
|
||||
|
||||
packet->stream_index = 0;
|
||||
av_packet_unref(packet);
|
||||
}
|
||||
|
||||
// Frame should always start with a zero:
|
||||
assert(self->current_packet.len == 0 ||
|
||||
((char*)self->current_packet.data)[0] == 0);
|
||||
|
||||
av_packet_free(&packet);
|
||||
send_frame_failure:
|
||||
av_frame_unref(filtered_frame);
|
||||
get_frame_failure:
|
||||
av_frame_free(&filtered_frame);
|
||||
return rc == AVERROR(EAGAIN) ? 0 : rc;
|
||||
}
|
||||
|
||||
static void h264_encoder__do_work(void* handle)
|
||||
{
|
||||
struct h264_encoder* self = aml_get_userdata(handle);
|
||||
|
||||
AVFrame* frame = fb_to_avframe(self->current_fb);
|
||||
assert(frame); // TODO
|
||||
|
||||
frame->hw_frames_ctx = av_buffer_ref(self->hw_frames_ctx);
|
||||
|
||||
if (self->current_frame_is_keyframe) {
|
||||
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(58, 7, 100)
|
||||
frame->flags |= AV_FRAME_FLAG_KEY;
|
||||
#else
|
||||
frame->key_frame = 1;
|
||||
#ifdef HAVE_FFMPEG
|
||||
extern struct h264_encoder_impl h264_encoder_ffmpeg_impl;
|
||||
#endif
|
||||
frame->pict_type = AV_PICTURE_TYPE_I;
|
||||
} else {
|
||||
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(58, 7, 100)
|
||||
frame->flags &= ~AV_FRAME_FLAG_KEY;
|
||||
#else
|
||||
frame->key_frame = 0;
|
||||
|
||||
#ifdef HAVE_V4L2
|
||||
extern struct h264_encoder_impl h264_encoder_v4l2m2m_impl;
|
||||
#endif
|
||||
frame->pict_type = AV_PICTURE_TYPE_P;
|
||||
}
|
||||
|
||||
int rc = h264_encoder__encode(self, frame);
|
||||
if (rc != 0) {
|
||||
char err[256];
|
||||
av_strerror(rc, err, sizeof(err));
|
||||
nvnc_log(NVNC_LOG_ERROR, "Failed to encode packet: %s", err);
|
||||
goto failure;
|
||||
}
|
||||
|
||||
failure:
|
||||
av_frame_unref(frame);
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
|
||||
static void h264_encoder__on_work_done(void* handle)
|
||||
{
|
||||
struct h264_encoder* self = aml_get_userdata(handle);
|
||||
|
||||
uint64_t pts = nvnc_fb_get_pts(self->current_fb);
|
||||
nvnc_fb_release(self->current_fb);
|
||||
nvnc_fb_unref(self->current_fb);
|
||||
self->current_fb = NULL;
|
||||
|
||||
DTRACE_PROBE1(neatvnc, h264_encode_frame_end, pts);
|
||||
|
||||
if (self->please_destroy) {
|
||||
vec_destroy(&self->current_packet);
|
||||
h264_encoder_destroy(self);
|
||||
return;
|
||||
}
|
||||
|
||||
if (self->current_packet.len == 0) {
|
||||
nvnc_log(NVNC_LOG_WARNING, "Whoops, encoded packet length is 0");
|
||||
return;
|
||||
}
|
||||
|
||||
void* userdata = self->userdata;
|
||||
|
||||
// Must make a copy of packet because the callback might destroy the
|
||||
// encoder object.
|
||||
struct vec packet;
|
||||
vec_init(&packet, self->current_packet.len);
|
||||
vec_append(&packet, self->current_packet.data,
|
||||
self->current_packet.len);
|
||||
|
||||
vec_clear(&self->current_packet);
|
||||
h264_encoder__schedule_work(self);
|
||||
|
||||
self->on_packet_ready(packet.data, packet.len, pts, userdata);
|
||||
vec_destroy(&packet);
|
||||
}
|
||||
|
||||
static int find_render_node(char *node, size_t maxlen) {
|
||||
bool r = -1;
|
||||
drmDevice *devices[64];
|
||||
|
||||
int n = drmGetDevices2(0, devices, sizeof(devices) / sizeof(devices[0]));
|
||||
for (int i = 0; i < n; ++i) {
|
||||
drmDevice *dev = devices[i];
|
||||
if (!(dev->available_nodes & (1 << DRM_NODE_RENDER)))
|
||||
continue;
|
||||
|
||||
strncpy(node, dev->nodes[DRM_NODE_RENDER], maxlen);
|
||||
node[maxlen - 1] = '\0';
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
drmFreeDevices(devices, n);
|
||||
return r;
|
||||
}
|
||||
|
||||
struct h264_encoder* h264_encoder_create(uint32_t width, uint32_t height,
|
||||
uint32_t format, int quality)
|
||||
{
|
||||
int rc;
|
||||
struct h264_encoder* encoder = NULL;
|
||||
|
||||
struct h264_encoder* self = calloc(1, sizeof(*self));
|
||||
if (!self)
|
||||
return NULL;
|
||||
#ifdef HAVE_V4L2
|
||||
encoder = h264_encoder_v4l2m2m_impl.create(width, height, format, quality);
|
||||
if (encoder) {
|
||||
return encoder;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (vec_init(&self->current_packet, 65536) < 0)
|
||||
goto packet_failure;
|
||||
#ifdef HAVE_FFMPEG
|
||||
encoder = h264_encoder_ffmpeg_impl.create(width, height, format, quality);
|
||||
if (encoder) {
|
||||
return encoder;
|
||||
}
|
||||
#endif
|
||||
|
||||
self->work = aml_work_new(h264_encoder__do_work,
|
||||
h264_encoder__on_work_done, self, NULL);
|
||||
if (!self->work)
|
||||
goto worker_failure;
|
||||
|
||||
char render_node[64];
|
||||
if (find_render_node(render_node, sizeof(render_node)) < 0)
|
||||
goto render_node_failure;
|
||||
|
||||
rc = av_hwdevice_ctx_create(&self->hw_device_ctx,
|
||||
AV_HWDEVICE_TYPE_DRM, render_node, NULL, 0);
|
||||
if (rc != 0)
|
||||
goto hwdevice_ctx_failure;
|
||||
|
||||
self->next_frame_should_be_keyframe = true;
|
||||
TAILQ_INIT(&self->fb_queue);
|
||||
|
||||
self->width = width;
|
||||
self->height = height;
|
||||
self->format = format;
|
||||
self->timebase = (AVRational){1, 1000000};
|
||||
self->sample_aspect_ratio = (AVRational){1, 1};
|
||||
self->av_pixel_format = drm_to_av_pixel_format(format);
|
||||
if (self->av_pixel_format == AV_PIX_FMT_NONE)
|
||||
goto pix_fmt_failure;
|
||||
|
||||
const AVCodec* codec = avcodec_find_encoder_by_name("h264_vaapi");
|
||||
if (!codec)
|
||||
goto codec_failure;
|
||||
|
||||
if (h264_encoder__init_hw_frames_context(self) < 0)
|
||||
goto hw_frames_context_failure;
|
||||
|
||||
if (h264_encoder__init_filters(self) < 0)
|
||||
goto filter_failure;
|
||||
|
||||
if (h264_encoder__init_codec_context(self, codec, quality) < 0)
|
||||
goto codec_context_failure;
|
||||
|
||||
self->codec_ctx->hw_frames_ctx =
|
||||
av_buffer_ref(self->filter_out->inputs[0]->hw_frames_ctx);
|
||||
|
||||
AVDictionary *opts = NULL;
|
||||
av_dict_set_int(&opts, "async_depth", 1, 0);
|
||||
|
||||
rc = avcodec_open2(self->codec_ctx, codec, &opts);
|
||||
av_dict_free(&opts);
|
||||
|
||||
if (rc != 0)
|
||||
goto avcodec_open_failure;
|
||||
|
||||
return self;
|
||||
|
||||
avcodec_open_failure:
|
||||
avcodec_free_context(&self->codec_ctx);
|
||||
codec_context_failure:
|
||||
filter_failure:
|
||||
av_buffer_unref(&self->hw_frames_ctx);
|
||||
hw_frames_context_failure:
|
||||
codec_failure:
|
||||
pix_fmt_failure:
|
||||
av_buffer_unref(&self->hw_device_ctx);
|
||||
hwdevice_ctx_failure:
|
||||
render_node_failure:
|
||||
aml_unref(self->work);
|
||||
worker_failure:
|
||||
vec_destroy(&self->current_packet);
|
||||
packet_failure:
|
||||
free(self);
|
||||
return NULL;
|
||||
return encoder;
|
||||
}
|
||||
|
||||
void h264_encoder_destroy(struct h264_encoder* self)
|
||||
{
|
||||
if (self->current_fb) {
|
||||
self->please_destroy = true;
|
||||
return;
|
||||
}
|
||||
|
||||
vec_destroy(&self->current_packet);
|
||||
av_buffer_unref(&self->hw_frames_ctx);
|
||||
avcodec_free_context(&self->codec_ctx);
|
||||
av_buffer_unref(&self->hw_device_ctx);
|
||||
avfilter_graph_free(&self->filter_graph);
|
||||
aml_unref(self->work);
|
||||
free(self);
|
||||
self->impl->destroy(self);
|
||||
}
|
||||
|
||||
void h264_encoder_set_packet_handler_fn(struct h264_encoder* self,
|
||||
h264_encoder_packet_handler_fn value)
|
||||
h264_encoder_packet_handler_fn fn)
|
||||
{
|
||||
self->on_packet_ready = value;
|
||||
self->on_packet_ready = fn;
|
||||
}
|
||||
|
||||
void h264_encoder_set_userdata(struct h264_encoder* self, void* value)
|
||||
void h264_encoder_set_userdata(struct h264_encoder* self, void* userdata)
|
||||
{
|
||||
self->userdata = value;
|
||||
self->userdata = userdata;
|
||||
}
|
||||
|
||||
void h264_encoder_feed(struct h264_encoder* self, struct nvnc_fb* fb)
|
||||
{
|
||||
self->impl->feed(self, fb);
|
||||
}
|
||||
|
||||
void h264_encoder_request_keyframe(struct h264_encoder* self)
|
||||
{
|
||||
self->next_frame_should_be_keyframe = true;
|
||||
}
|
||||
|
||||
void h264_encoder_feed(struct h264_encoder* self, struct nvnc_fb* fb)
|
||||
{
|
||||
assert(fb->type == NVNC_FB_GBM_BO);
|
||||
|
||||
// TODO: Add transform filter
|
||||
assert(fb->transform == NVNC_TRANSFORM_NORMAL);
|
||||
|
||||
int rc = fb_queue_enqueue(&self->fb_queue, fb);
|
||||
assert(rc == 0); // TODO
|
||||
|
||||
nvnc_fb_hold(fb);
|
||||
|
||||
rc = h264_encoder__schedule_work(self);
|
||||
assert(rc == 0); // TODO
|
||||
}
|
||||
|
|
90
src/server.c
90
src/server.c
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2019 - 2022 Andri Yngvason
|
||||
* Copyright (c) 2019 - 2024 Andri Yngvason
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
|
@ -85,6 +85,7 @@ static void process_fb_update_requests(struct nvnc_client* client);
|
|||
static void sockaddr_to_string(char* dst, size_t sz,
|
||||
const struct sockaddr* addr);
|
||||
static const char* encoding_to_string(enum rfb_encodings encoding);
|
||||
static bool client_send_led_state(struct nvnc_client* client);
|
||||
|
||||
#if defined(PROJECT_VERSION)
|
||||
EXPORT const char nvnc_version[] = PROJECT_VERSION;
|
||||
|
@ -1025,6 +1026,8 @@ static int on_client_set_encodings(struct nvnc_client* client)
|
|||
case RFB_ENCODING_DESKTOPSIZE:
|
||||
case RFB_ENCODING_EXTENDEDDESKTOPSIZE:
|
||||
case RFB_ENCODING_QEMU_EXT_KEY_EVENT:
|
||||
case RFB_ENCODING_QEMU_LED_STATE:
|
||||
case RFB_ENCODING_VMWARE_LED_STATE:
|
||||
#ifdef ENABLE_EXPERIMENTAL
|
||||
case RFB_ENCODING_PTS:
|
||||
case RFB_ENCODING_NTP:
|
||||
|
@ -1117,6 +1120,8 @@ static const char* encoding_to_string(enum rfb_encodings encoding)
|
|||
case RFB_ENCODING_DESKTOPSIZE: return "desktop-size";
|
||||
case RFB_ENCODING_EXTENDEDDESKTOPSIZE: return "extended-desktop-size";
|
||||
case RFB_ENCODING_QEMU_EXT_KEY_EVENT: return "qemu-extended-key-event";
|
||||
case RFB_ENCODING_QEMU_LED_STATE: return "qemu-led-state";
|
||||
case RFB_ENCODING_VMWARE_LED_STATE: return "vmware-led-state";
|
||||
case RFB_ENCODING_PTS: return "pts";
|
||||
case RFB_ENCODING_NTP: return "ntp";
|
||||
}
|
||||
|
@ -1225,6 +1230,11 @@ static void process_fb_update_requests(struct nvnc_client* client)
|
|||
return;
|
||||
}
|
||||
|
||||
if (client_send_led_state(client)) {
|
||||
if (--client->n_pending_requests <= 0)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!pixman_region_not_empty(&client->damage))
|
||||
return;
|
||||
|
||||
|
@ -1796,6 +1806,7 @@ static void on_connection(void* obj)
|
|||
client->ref = 1;
|
||||
client->server = server;
|
||||
client->quality = 10; /* default to lossless */
|
||||
client->led_state = -1; /* trigger sending of initial state */
|
||||
|
||||
int fd = accept(server->fd, NULL, 0);
|
||||
if (fd < 0) {
|
||||
|
@ -1964,7 +1975,7 @@ static int bind_address_unix(const char* name)
|
|||
}
|
||||
|
||||
static int bind_address(const char* name, uint16_t port,
|
||||
enum nvnc__socket_type type)
|
||||
int fd, enum nvnc__socket_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case NVNC__SOCKET_TCP:
|
||||
|
@ -1972,6 +1983,9 @@ static int bind_address(const char* name, uint16_t port,
|
|||
return bind_address_tcp(name, port);
|
||||
case NVNC__SOCKET_UNIX:
|
||||
return bind_address_unix(name);
|
||||
case NVNC__SOCKET_FROM_FD:
|
||||
// nothing to bind
|
||||
return fd;
|
||||
}
|
||||
|
||||
nvnc_log(NVNC_LOG_PANIC, "Unknown socket address type");
|
||||
|
@ -1979,7 +1993,7 @@ static int bind_address(const char* name, uint16_t port,
|
|||
}
|
||||
|
||||
static struct nvnc* open_common(const char* address, uint16_t port,
|
||||
enum nvnc__socket_type type)
|
||||
int fd, enum nvnc__socket_type type)
|
||||
{
|
||||
nvnc__log_init();
|
||||
|
||||
|
@ -1995,7 +2009,7 @@ static struct nvnc* open_common(const char* address, uint16_t port,
|
|||
|
||||
LIST_INIT(&self->clients);
|
||||
|
||||
self->fd = bind_address(address, port, type);
|
||||
self->fd = bind_address(address, port, fd, type);
|
||||
if (self->fd < 0)
|
||||
goto bind_failure;
|
||||
|
||||
|
@ -2028,14 +2042,14 @@ bind_failure:
|
|||
EXPORT
|
||||
struct nvnc* nvnc_open(const char* address, uint16_t port)
|
||||
{
|
||||
return open_common(address, port, NVNC__SOCKET_TCP);
|
||||
return open_common(address, port, -1, NVNC__SOCKET_TCP);
|
||||
}
|
||||
|
||||
EXPORT
|
||||
struct nvnc* nvnc_open_websocket(const char *address, uint16_t port)
|
||||
{
|
||||
#ifdef ENABLE_WEBSOCKET
|
||||
return open_common(address, port, NVNC__SOCKET_WEBSOCKET);
|
||||
return open_common(address, port, -1, NVNC__SOCKET_WEBSOCKET);
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
|
@ -2044,7 +2058,13 @@ struct nvnc* nvnc_open_websocket(const char *address, uint16_t port)
|
|||
EXPORT
|
||||
struct nvnc* nvnc_open_unix(const char* address)
|
||||
{
|
||||
return open_common(address, 0, NVNC__SOCKET_UNIX);
|
||||
return open_common(address, 0, -1, NVNC__SOCKET_UNIX);
|
||||
}
|
||||
|
||||
EXPORT
|
||||
struct nvnc* nvnc_open_from_fd(int fd)
|
||||
{
|
||||
return open_common(NULL, 0, fd, NVNC__SOCKET_FROM_FD);
|
||||
}
|
||||
|
||||
static void unlink_fd_path(int fd)
|
||||
|
@ -2109,7 +2129,7 @@ static void complete_fb_update(struct nvnc_client* client)
|
|||
client->current_fb = NULL;
|
||||
process_fb_update_requests(client);
|
||||
client_unref(client);
|
||||
DTRACE_PROBE2(neatvnc, update_fb_done, client, pts);
|
||||
DTRACE_PROBE1(neatvnc, update_fb_done, client);
|
||||
}
|
||||
|
||||
static void on_write_frame_done(void* userdata, enum stream_req_status status)
|
||||
|
@ -2425,6 +2445,60 @@ bool nvnc_client_supports_cursor(const struct nvnc_client* client)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool client_send_led_state(struct nvnc_client* client)
|
||||
{
|
||||
if (client->pending_led_state == client->led_state)
|
||||
return false;
|
||||
|
||||
bool have_qemu_led_state =
|
||||
client_has_encoding(client, RFB_ENCODING_QEMU_LED_STATE);
|
||||
bool have_vmware_led_state =
|
||||
client_has_encoding(client, RFB_ENCODING_VMWARE_LED_STATE);
|
||||
|
||||
if (!have_qemu_led_state && !have_vmware_led_state)
|
||||
return false;
|
||||
|
||||
nvnc_log(NVNC_LOG_DEBUG, "Keyboard LED state changed: %x -> %x",
|
||||
client->led_state, client->pending_led_state);
|
||||
|
||||
struct vec payload;
|
||||
vec_init(&payload, 4096);
|
||||
|
||||
struct rfb_server_fb_update_msg head = {
|
||||
.type = RFB_SERVER_TO_CLIENT_FRAMEBUFFER_UPDATE,
|
||||
.n_rects = htons(1),
|
||||
};
|
||||
|
||||
struct rfb_server_fb_rect rect = {
|
||||
.encoding = htonl(RFB_ENCODING_QEMU_LED_STATE),
|
||||
};
|
||||
|
||||
vec_append(&payload, &head, sizeof(head));
|
||||
vec_append(&payload, &rect, sizeof(rect));
|
||||
|
||||
if (have_qemu_led_state) {
|
||||
uint8_t data = client->pending_led_state;
|
||||
vec_append(&payload, &data, sizeof(data));
|
||||
} else if (have_vmware_led_state) {
|
||||
uint32_t data = htonl(client->pending_led_state);
|
||||
vec_append(&payload, &data, sizeof(data));
|
||||
}
|
||||
|
||||
stream_send(client->net_stream, rcbuf_new(payload.data, payload.len),
|
||||
NULL, NULL);
|
||||
client->led_state = client->pending_led_state;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
EXPORT
|
||||
void nvnc_client_set_led_state(struct nvnc_client* client,
|
||||
enum nvnc_keyboard_led_state state)
|
||||
{
|
||||
client->pending_led_state = state;
|
||||
process_fb_update_requests(client);
|
||||
}
|
||||
|
||||
EXPORT
|
||||
void nvnc_set_name(struct nvnc* self, const char* name)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue