Compare commits

..

29 Commits

Author SHA1 Message Date
Andri Yngvason c700a2c02c Remove logging of sensitive information 2023-09-10 17:47:18 +00:00
Andri Yngvason 89b759c838 server: Allow arbitrary RSA key length 2023-09-10 17:39:12 +00:00
Andri Yngvason 3c3de5f323 API: Add method to set RSA credentials 2023-09-10 17:17:09 +00:00
Andri Yngvason 0f5d8e87de crypto: Add method to import RSA private keys 2023-09-10 17:16:24 +00:00
Andri Yngvason 135127dcc1 Export base64 encoder and decoder 2023-09-09 23:52:57 +00:00
Andri Yngvason c4f48bc47d ws-handshake: Use own base64 and SHA1 implementations 2023-09-09 23:29:48 +00:00
Andri Yngvason 923fa4a53a Add base64 encoder & decoder
I prefer to have these independent of the crypto suite that's being used.
2023-09-09 23:12:47 +00:00
Andri Yngvason 24e4c5900a Implement RSA-AES-256 security type 2023-09-05 20:31:09 +00:00
Andri Yngvason 1a15af0845 fixup! crypto: Add AES256-EAX cipher 2023-09-05 20:28:55 +00:00
Andri Yngvason 6e97000f11 server: Clean up crypto resources on disconnect 2023-09-05 08:48:41 +00:00
Andri Yngvason e7a24822a7 crypto: Make deleting NULL pointers noop 2023-09-05 08:46:11 +00:00
Andri Yngvason 7eb9882877 server: Define rsa-aes server key length constant 2023-09-05 08:41:25 +00:00
Andri Yngvason 103fbe996e crypto: Add sha256 2023-09-04 22:20:31 +00:00
Andri Yngvason b2ad06ae3c crypto: Remove unused code 2023-09-04 22:17:31 +00:00
Andri Yngvason b7614f64a4 crypto: Add AES256-EAX cipher 2023-09-04 22:15:40 +00:00
Andri Yngvason 1d2e6c05a9 server: Use hash_{one,many} 2023-09-04 21:41:05 +00:00
Andri Yngvason 3f949d8e66 crypto: Add helper functions for hashing 2023-09-04 21:40:05 +00:00
Andri Yngvason d418b33dd7 Create dedicated RSA-AES stream
The message format isn't really within the domain of the cipher, so it
doesn't belong to the crypto interface.
2023-09-03 22:30:13 +00:00
Andri Yngvason e65660aea0 fixup! fixup! stream-ws: Inherit stream-tcp 2023-09-03 21:59:35 +00:00
Andri Yngvason 6869eb42e6 fixup! stream-ws: Inherit stream-tcp 2023-09-03 18:47:10 +00:00
Andri Yngvason abf9cc54c0 stream-ws: Clean up exec-and-send resources 2023-09-03 18:37:23 +00:00
Andri Yngvason 83ed3273ce stream-ws: Inherit stream-tcp
This eliminates the need for implementing all stream functions
2023-09-03 17:34:08 +00:00
Andri Yngvason 0199d87368 Add temporary api function to enable auth without tls 2023-08-24 10:14:37 +00:00
Andri Yngvason 727fd785c6 Implement RSA-AES 2023-08-24 10:13:52 +00:00
Andri Yngvason ff5ca722b1 crypto: Integrate message handling into cipher 2023-08-24 10:10:43 +00:00
Andri Yngvason 8cb4910d76 crypto: Add RSA and AES-EAX 2023-08-15 21:41:11 +00:00
Andri Yngvason ef4be68e4f Implement Apple's Diffie-Hellman based security type 30 2023-08-13 20:45:49 +00:00
Andri Yngvason 8ddca0a197 stream: Integrate cipher 2023-08-13 19:55:21 +00:00
Andri Yngvason f6336e02be Add abstract interface for low level crypto 2023-08-13 19:55:21 +00:00
42 changed files with 1072 additions and 9400 deletions

1
.gitignore vendored
View File

@ -8,4 +8,3 @@ build
experiments
subprojects
sandbox
.vscode

View File

@ -1,2 +1 @@
github: any1
patreon: andriyngvason

View File

@ -18,9 +18,6 @@ neat.
* gnutls (optional)
* libdrm (optional)
* libturbojpeg (optional)
* nettle (optional)
* hogweed (optional)
* gmp (optional)
* pixman
* zlib

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019 - 2024 Andri Yngvason
* Copyright (c) 2019 - 2020 Andri Yngvason
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -83,6 +83,7 @@ struct nvnc_client {
struct nvnc_common common;
int ref;
struct stream* net_stream;
char hostname[256];
char username[256];
struct nvnc* server;
enum nvnc_client_state state;
@ -102,15 +103,10 @@ struct nvnc_client {
uint32_t known_width;
uint32_t known_height;
struct cut_text cut_text;
bool is_ext_notified;
bool is_qemu_key_ext_notified;
struct encoder* encoder;
struct encoder* zrle_encoder;
struct encoder* tight_encoder;
uint32_t cursor_seq;
int quality;
bool formats_changed;
enum nvnc_keyboard_led_state led_state;
enum nvnc_keyboard_led_state pending_led_state;
#ifdef HAVE_CRYPTO
struct crypto_key* apple_dh_secret;
@ -131,7 +127,6 @@ enum nvnc__socket_type {
NVNC__SOCKET_TCP,
NVNC__SOCKET_UNIX,
NVNC__SOCKET_WEBSOCKET,
NVNC__SOCKET_FROM_FD,
};
struct nvnc {
@ -157,12 +152,10 @@ struct nvnc {
} cursor;
uint32_t cursor_seq;
enum nvnc_auth_flags auth_flags;
nvnc_auth_fn auth_fn;
void* auth_ud;
#ifdef ENABLE_TLS
gnutls_certificate_credentials_t tls_creds;
nvnc_auth_fn auth_fn;
void* auth_ud;
#endif
#ifdef HAVE_CRYPTO

View File

@ -20,10 +20,8 @@
struct pixman_region16;
struct nvnc_fb;
struct XXH3_state_s;
struct damage_refinery {
struct XXH3_state_s* state;
uint32_t* hashes;
uint32_t width;
uint32_t height;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021 - 2024 Andri Yngvason
* Copyright (c) 2021 - 2022 Andri Yngvason
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -17,28 +17,13 @@
#include <stdint.h>
#include <unistd.h>
#include <stdbool.h>
struct nvnc_fb;
struct h264_encoder;
struct nvnc_fb;
typedef void (*h264_encoder_packet_handler_fn)(const void* payload, size_t size,
uint64_t pts, void* userdata);
struct h264_encoder_impl {
struct h264_encoder* (*create)(uint32_t width, uint32_t height,
uint32_t format, int quality);
void (*destroy)(struct h264_encoder*);
void (*feed)(struct h264_encoder*, struct nvnc_fb*);
};
struct h264_encoder {
struct h264_encoder_impl *impl;
h264_encoder_packet_handler_fn on_packet_ready;
void* userdata;
bool next_frame_should_be_keyframe;
};
struct h264_encoder* h264_encoder_create(uint32_t width, uint32_t height,
uint32_t format, int quality);

View File

@ -0,0 +1,48 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 Joseph Werle
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MURMURHASH_H
#define MURMURHASH_H 1
#include <stdint.h>
#define MURMURHASH_VERSION "0.0.3"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Returns a murmur hash of `key' based on `seed'
* using the MurmurHash3 algorithm
*/
uint32_t
murmurhash (const char *, uint32_t, uint32_t);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -22,7 +22,6 @@
#include <stdarg.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/socket.h>
#define NVNC_NO_PTS UINT64_MAX
@ -49,6 +48,9 @@
#define nvnc_trace(...)
#endif
#define NVNC_BASE64_ENCODED_SIZE(x) ((((x) + 2) / 3) * 4 + 1)
#define NVNC_BASE64_DECODED_MAX_SIZE(x) ((((x) + 3) / 4) * 3)
struct nvnc;
struct nvnc_client;
struct nvnc_desktop_layout;
@ -86,12 +88,6 @@ enum nvnc_transform {
NVNC_TRANSFORM_FLIPPED_270 = 7,
};
enum nvnc_keyboard_led_state {
NVNC_KEYBOARD_LED_SCROLL_LOCK = 1 << 0,
NVNC_KEYBOARD_LED_NUM_LOCK = 1 << 1,
NVNC_KEYBOARD_LED_CAPS_LOCK = 1 << 2,
};
enum nvnc_log_level {
NVNC_LOG_PANIC = 0,
NVNC_LOG_ERROR = 1,
@ -101,11 +97,6 @@ enum nvnc_log_level {
NVNC_LOG_TRACE = 5,
};
enum nvnc_auth_flags {
NVNC_AUTH_REQUIRE_AUTH = 1 << 0,
NVNC_AUTH_REQUIRE_ENCRYPTION = 1 << 1,
};
struct nvnc_log_data {
enum nvnc_log_level level;
const char* file;
@ -138,7 +129,6 @@ extern const char nvnc_version[];
struct nvnc* nvnc_open(const char* addr, uint16_t port);
struct nvnc* nvnc_open_unix(const char *addr);
struct nvnc* nvnc_open_websocket(const char* addr, uint16_t port);
struct nvnc* nvnc_open_from_fd(int fd);
void nvnc_close(struct nvnc* self);
void nvnc_add_display(struct nvnc*, struct nvnc_display*);
@ -149,17 +139,13 @@ void* nvnc_get_userdata(const void* self);
struct nvnc* nvnc_client_get_server(const struct nvnc_client* client);
bool nvnc_client_supports_cursor(const struct nvnc_client* client);
int nvnc_client_get_address(const struct nvnc_client* client,
struct sockaddr* restrict addr, socklen_t* restrict addrlen);
const char* nvnc_client_get_hostname(const struct nvnc_client* client);
const char* nvnc_client_get_auth_username(const struct nvnc_client* client);
struct nvnc_client* nvnc_client_first(struct nvnc* self);
struct nvnc_client* nvnc_client_next(struct nvnc_client* client);
void nvnc_client_close(struct nvnc_client* client);
void nvnc_client_set_led_state(struct nvnc_client*,
enum nvnc_keyboard_led_state);
void nvnc_set_name(struct nvnc* self, const char* name);
void nvnc_set_key_fn(struct nvnc* self, nvnc_key_fn);
@ -171,11 +157,17 @@ void nvnc_set_client_cleanup_fn(struct nvnc_client* self, nvnc_client_fn fn);
void nvnc_set_cut_text_fn(struct nvnc*, nvnc_cut_text_fn fn);
void nvnc_set_desktop_layout_fn(struct nvnc* self, nvnc_desktop_layout_fn);
/* TODO: Changes this interface so that we have enable_auth(auth_fn),
* set_tls_creds(key, cert), and has_tls() -> bool
*/
bool nvnc_has_auth(void);
int nvnc_enable_auth(struct nvnc* self, enum nvnc_auth_flags flags,
nvnc_auth_fn, void* userdata);
int nvnc_set_tls_creds(struct nvnc* self, const char* privkey_path,
const char* cert_path);
int nvnc_enable_auth(struct nvnc* self, const char* privkey_path,
const char* cert_path, nvnc_auth_fn, void* userdata);
int nvnc_enable_auth2(struct nvnc* self, nvnc_auth_fn, void* userdata);
int nvnc_set_rsa_creds(struct nvnc* self, const char* private_key_path);
struct nvnc_fb* nvnc_fb_new(uint16_t width, uint16_t height,
@ -247,9 +239,9 @@ void nvnc_set_cursor(struct nvnc*, struct nvnc_fb*, uint16_t width,
uint16_t height, uint16_t hotspot_x, uint16_t hotspot_y,
bool is_damaged);
void nvnc_default_logger(const struct nvnc_log_data* meta, const char* message);
void nvnc_set_log_fn(nvnc_log_fn);
void nvnc_set_log_fn_thread_local(nvnc_log_fn fn);
void nvnc_set_log_level(enum nvnc_log_level);
void nvnc__log(const struct nvnc_log_data*, const char* fmt, ...);
void nvnc_base64_encode(char* dst, const uint8_t* src, size_t src_len);
ssize_t nvnc_base64_decode(uint8_t* dst, const char* src);

View File

@ -22,11 +22,10 @@
#include <stdbool.h>
struct rfb_pixel_format;
struct rfb_set_colour_map_entries_msg;
void pixel_to_cpixel(uint8_t* restrict dst,
void pixel32_to_cpixel(uint8_t* restrict dst,
const struct rfb_pixel_format* dst_fmt,
const uint8_t* restrict src,
const uint32_t* restrict src,
const struct rfb_pixel_format* src_fmt,
size_t bytes_per_cpixel, size_t len);
@ -42,4 +41,3 @@ bool extract_alpha_mask(uint8_t* dst, const void* src, uint32_t format,
const char* drm_format_to_string(uint32_t fmt);
const char* rfb_pixfmt_to_string(const struct rfb_pixel_format* fmt);
void make_rgb332_pal8_map(struct rfb_set_colour_map_entries_msg* msg);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019 - 2024 Andri Yngvason
* Copyright (c) 2019 - 2022 Andri Yngvason
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -69,11 +69,9 @@ enum rfb_encodings {
RFB_ENCODING_CURSOR = -239,
RFB_ENCODING_DESKTOPSIZE = -223,
RFB_ENCODING_QEMU_EXT_KEY_EVENT = -258,
RFB_ENCODING_QEMU_LED_STATE = -261,
RFB_ENCODING_EXTENDEDDESKTOPSIZE = -308,
RFB_ENCODING_PTS = -1000,
RFB_ENCODING_NTP = -1001,
RFB_ENCODING_VMWARE_LED_STATE = 0x574d5668,
};
#define RFB_ENCODING_JPEG_HIGHQ -23
@ -116,13 +114,6 @@ enum rfb_rsa_aes_cred_subtype {
RFB_RSA_AES_CRED_SUBTYPE_ONLY_PASS = 2,
};
// This is the same for both qemu and vmware extensions
enum rfb_led_state {
RFB_LED_STATE_SCROLL_LOCK = 1 << 0,
RFB_LED_STATE_NUM_LOCK = 1 << 1,
RFB_LED_STATE_CAPS_LOCK = 1 << 2,
};
struct rfb_security_types_msg {
uint8_t n;
uint8_t types[0];
@ -275,15 +266,3 @@ struct rfb_rsa_aes_challenge_msg {
uint16_t length;
uint8_t challenge[0];
} RFB_PACKED;
struct rfb_colour_map_entry {
uint16_t r, g, b;
} RFB_PACKED;
struct rfb_set_colour_map_entries_msg {
uint8_t type;
uint8_t padding;
uint16_t first_colour;
uint16_t n_colours;
struct rfb_colour_map_entry colours[0];
} RFB_PACKED;

View File

@ -1,19 +1,3 @@
/*
* Copyright (c) 2023 Andri Yngvason
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#pragma once
#include <stdint.h>

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
project(
'neatvnc',
'c',
version: '0.9-dev',
version: '0.6.0',
license: 'ISC',
default_options: [
'c_std=gnu11',
@ -13,6 +13,7 @@ buildtype = get_option('buildtype')
host_system = host_machine.system()
c_args = [
'-DPROJECT_VERSION="@0@"'.format(meson.project_version()),
'-D_GNU_SOURCE',
'-fvisibility=hidden',
'-DAML_UNSTABLE_API=1',
@ -26,20 +27,17 @@ if buildtype != 'debug' and buildtype != 'debugoptimized'
c_args += '-DNDEBUG'
endif
version = '"@0@"'.format(meson.project_version())
git = find_program('git', native: true, required: false)
if git.found()
git_commit = run_command([git, 'rev-parse', '--short', 'HEAD'])
git_describe = run_command([git, 'describe', '--tags', '--long'])
git_branch = run_command([git, 'rev-parse', '--abbrev-ref', 'HEAD'])
if git_commit.returncode() == 0 and git_branch.returncode() == 0
version = '"v@0@-@1@ (@2@)"'.format(
meson.project_version(),
git_commit.stdout().strip(),
if git_describe.returncode() == 0 and git_branch.returncode() == 0
c_args += '-DGIT_VERSION="@0@ (@1@)"'.format(
git_describe.stdout().strip(),
git_branch.stdout().strip(),
)
endif
endif
add_project_arguments('-DPROJECT_VERSION=@0@'.format(version), language: 'c')
libdrm_inc = dependency('libdrm').partial_dependency(compile_args: true)
@ -93,6 +91,7 @@ sources = [
'src/resampler.c',
'src/transform-util.c',
'src/damage-refinery.c',
'src/murmurhash.c',
'src/encoder.c',
'src/cursor.c',
'src/logging.c',
@ -138,24 +137,11 @@ if gbm.found()
config.set('HAVE_GBM', true)
endif
have_ffmpeg = gbm.found() and libdrm.found() and libavcodec.found() and libavfilter.found() and libavutil.found()
have_v4l2 = gbm.found() and libdrm.found() and cc.check_header('linux/videodev2.h')
if have_ffmpeg
sources += [ 'src/h264-encoder-ffmpeg-impl.c' ]
dependencies += [libdrm, libavcodec, libavfilter, libavutil]
config.set('HAVE_FFMPEG', true)
config.set('HAVE_LIBAVUTIL', true)
endif
if have_v4l2
sources += [ 'src/h264-encoder-v4l2m2m-impl.c' ]
config.set('HAVE_V4L2', true)
endif
if have_ffmpeg or have_v4l2
if gbm.found() and libdrm.found() and libavcodec.found() and libavfilter.found() and libavutil.found()
sources += [ 'src/h264-encoder.c', 'src/open-h264.c' ]
dependencies += [libdrm, libavcodec, libavfilter, libavutil]
config.set('ENABLE_OPEN_H264', true)
config.set('HAVE_LIBAVUTIL', true)
endif
if enable_websocket
@ -168,13 +154,6 @@ if enable_websocket
config.set('ENABLE_WEBSOCKET', true)
endif
if get_option('experimental')
if buildtype == 'release'
warning('Experimental features enabled in release build')
endif
config.set('ENABLE_EXPERIMENTAL', true)
endif
configure_file(
output: 'config.h',
configuration: config,

View File

@ -7,4 +7,3 @@ option('nettle', type: 'feature', value: 'auto', description: 'Enable nettle low
option('systemtap', type: 'boolean', value: false, description: 'Enable tracing using sdt')
option('gbm', type: 'feature', value: 'auto', description: 'Enable GBM integration')
option('h264', type: 'feature', value: 'auto', description: 'Enable open h264 encoding')
option('experimental', type: 'boolean', value: false, description: 'Enable experimental features')

View File

@ -14,6 +14,7 @@
*/
#include "base64.h"
#include "neatvnc.h"
#include <unistd.h>
#include <stdint.h>
@ -21,6 +22,8 @@
#include <stdlib.h>
#include <string.h>
#define EXPORT __attribute__((visibility("default")))
static const char base64_enc_lut[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
@ -153,3 +156,15 @@ ssize_t base64_decode(uint8_t* dst, const char* src)
return i * 3 + di;
}
EXPORT
void nvnc_base64_encode(char* dst, const uint8_t* src, size_t src_len)
{
base64_encode(dst, src, src_len);
}
EXPORT
ssize_t nvnc_base64_decode(uint8_t* dst, const char* src)
{
return base64_decode(dst, src);
}

View File

@ -318,7 +318,7 @@ static struct crypto_cipher* crypto_cipher_new_aes128_ecb(
aes128_set_encrypt_key(&self->enc_ctx.aes128_ecb, enc_key);
if (dec_key)
aes128_set_decrypt_key(&self->dec_ctx.aes128_ecb, dec_key);
aes128_set_decrypt_key(&self->enc_ctx.aes128_ecb, dec_key);
self->encrypt = crypto_cipher_aes128_ecb_encrypt;
self->decrypt = crypto_cipher_aes128_ecb_decrypt;
@ -609,8 +609,7 @@ bool crypto_rsa_priv_key_load(struct crypto_rsa_priv_key* priv,
}
char head[128];
strncpy(head, line, sizeof(head));
head[sizeof(head) - 1] = '\0';
strlcpy(head, line, sizeof(head));
char* end = strchr(head, '\n');
if (end)
*end = '\0';
@ -634,9 +633,9 @@ bool crypto_rsa_priv_key_load(struct crypto_rsa_priv_key* priv,
uint8_t* der = malloc(BASE64_DECODED_MAX_SIZE(base64_der.len));
assert(der);
vec_destroy(&base64_der);
ssize_t der_len = base64_decode(der, base64_der.data);
vec_destroy(&base64_der);
if (der_len < 0) {
free(der);
return false;

View File

@ -94,14 +94,12 @@ int cursor_encode(struct vec* dst, struct rfb_pixel_format* pixfmt,
uint8_t* dstdata = dst->data;
dstdata += dst->len;
int32_t src_byte_stride = image->stride * (srcfmt.bits_per_pixel / 8);
if((int32_t)width == image->stride) {
pixel_to_cpixel(dstdata, pixfmt, image->addr, &srcfmt, bpp, size);
pixel32_to_cpixel(dstdata, pixfmt, image->addr, &srcfmt, bpp, size);
} else {
for (uint32_t y = 0; y < height; ++y) {
pixel_to_cpixel(dstdata + y * bpp * width, pixfmt,
(uint8_t*)image->addr + y * src_byte_stride,
pixel32_to_cpixel(dstdata + y * bpp * width, pixfmt,
(uint32_t*)image->addr + y * image->stride,
&srcfmt, bpp, width);
}
}

View File

@ -22,13 +22,8 @@
#include <sys/param.h>
#include "fb.h"
#include "pixels.h"
#include "damage-refinery.h"
#define XXH_STATIC_LINKING_ONLY
#define XXH_IMPLEMENTATION
#define XXH_VECTOR XXH_SCALAR
#include "xxhash.h"
#include "murmurhash.h"
#define UDIV_UP(a, b) (((a) + (b) - 1) / (b))
@ -43,15 +38,9 @@ int damage_refinery_init(struct damage_refinery* self, uint32_t width,
uint32_t twidth = UDIV_UP(width, 32);
uint32_t theight = UDIV_UP(height, 32);
self->state = XXH3_createState();
if (!self->state)
return -1;
self->hashes = calloc(twidth * theight, sizeof(*self->hashes));
if (!self->hashes) {
XXH3_freeState(self->state);
if (!self->hashes)
return -1;
}
return 0;
}
@ -68,31 +57,28 @@ int damage_refinery_resize(struct damage_refinery* self, uint32_t width,
void damage_refinery_destroy(struct damage_refinery* self)
{
XXH3_freeState(self->state);
free(self->hashes);
}
static uint32_t damage_hash_tile(struct damage_refinery* self, uint32_t tx,
uint32_t ty, const struct nvnc_fb* buffer)
{
uint8_t* pixels = buffer->addr;
int bpp = pixel_size_from_fourcc(buffer->fourcc_format);
int byte_stride = buffer->stride * bpp;
uint32_t* pixels = buffer->addr;
int pixel_stride = buffer->stride;
int x_start = tx * 32;
int x_stop = MIN((tx + 1) * 32, self->width);
int y_start = ty * 32;
int y_stop = MIN((ty + 1) * 32, self->height);
int32_t xoff = x_start * bpp;
uint32_t hash = 0;
XXH3_64bits_reset(self->state);
for (int y = y_start; y < y_stop; ++y) {
XXH3_64bits_update(self->state, pixels + xoff + y * byte_stride,
bpp * (x_stop - x_start));
}
// TODO: Support different pixel sizes
for (int y = y_start; y < y_stop; ++y)
hash = murmurhash((void*)&(pixels[x_start + y * pixel_stride]),
4 * (x_stop - x_start), hash);
return XXH3_64bits_digest(self->state);
return hash;
}
static uint32_t* damage_tile_hash_ptr(struct damage_refinery* self,

View File

@ -41,8 +41,6 @@ struct nvnc_fb* nvnc_fb_new(uint16_t width, uint16_t height,
if (!fb)
return NULL;
uint32_t bpp = pixel_size_from_fourcc(fourcc_format);
fb->type = NVNC_FB_SIMPLE;
fb->ref = 1;
fb->width = width;
@ -51,7 +49,7 @@ struct nvnc_fb* nvnc_fb_new(uint16_t width, uint16_t height,
fb->stride = stride;
fb->pts = NVNC_NO_PTS;
size_t size = height * stride * bpp;
size_t size = height * stride * 4; /* Assume 4 byte format for now */
size_t alignment = MAX(4, sizeof(void*));
size_t aligned_size = ALIGN_UP(size, alignment);

View File

@ -1,627 +0,0 @@
/*
* Copyright (c) 2021 - 2024 Andri Yngvason
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "h264-encoder.h"
#include "neatvnc.h"
#include "fb.h"
#include "sys/queue.h"
#include "vec.h"
#include "usdt.h"
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <unistd.h>
#include <assert.h>
#include <gbm.h>
#include <xf86drm.h>
#include <aml.h>
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext.h>
#include <libavutil/hwcontext_drm.h>
#include <libavutil/pixdesc.h>
#include <libavutil/dict.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libdrm/drm_fourcc.h>
struct h264_encoder;
struct fb_queue_entry {
struct nvnc_fb* fb;
TAILQ_ENTRY(fb_queue_entry) link;
};
TAILQ_HEAD(fb_queue, fb_queue_entry);
struct h264_encoder_ffmpeg {
struct h264_encoder base;
uint32_t width;
uint32_t height;
uint32_t format;
AVRational timebase;
AVRational sample_aspect_ratio;
enum AVPixelFormat av_pixel_format;
/* type: AVHWDeviceContext */
AVBufferRef* hw_device_ctx;
/* type: AVHWFramesContext */
AVBufferRef* hw_frames_ctx;
AVCodecContext* codec_ctx;
AVFilterGraph* filter_graph;
AVFilterContext* filter_in;
AVFilterContext* filter_out;
struct fb_queue fb_queue;
struct aml_work* work;
struct nvnc_fb* current_fb;
struct vec current_packet;
bool current_frame_is_keyframe;
bool please_destroy;
};
struct h264_encoder_impl h264_encoder_ffmpeg_impl;
static enum AVPixelFormat drm_to_av_pixel_format(uint32_t format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
return AV_PIX_FMT_BGR0;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return AV_PIX_FMT_RGB0;
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
return AV_PIX_FMT_0BGR;
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRA8888:
return AV_PIX_FMT_0RGB;
}
return AV_PIX_FMT_NONE;
}
static void hw_frame_desc_free(void* opaque, uint8_t* data)
{
struct AVDRMFrameDescriptor* desc = (void*)data;
assert(desc);
for (int i = 0; i < desc->nb_objects; ++i)
close(desc->objects[i].fd);
free(desc);
}
// TODO: Maybe do this once per frame inside nvnc_fb?
static AVFrame* fb_to_avframe(struct nvnc_fb* fb)
{
struct gbm_bo* bo = fb->bo;
int n_planes = gbm_bo_get_plane_count(bo);
AVDRMFrameDescriptor* desc = calloc(1, sizeof(*desc));
desc->nb_objects = n_planes;
desc->nb_layers = 1;
desc->layers[0].format = gbm_bo_get_format(bo);
desc->layers[0].nb_planes = n_planes;
for (int i = 0; i < n_planes; ++i) {
uint32_t stride = gbm_bo_get_stride_for_plane(bo, i);
desc->objects[i].fd = gbm_bo_get_fd_for_plane(bo, i);
desc->objects[i].size = stride * fb->height;
desc->objects[i].format_modifier = gbm_bo_get_modifier(bo);
desc->layers[0].format = gbm_bo_get_format(bo);
desc->layers[0].planes[i].object_index = i;
desc->layers[0].planes[i].offset = gbm_bo_get_offset(bo, i);
desc->layers[0].planes[i].pitch = stride;
}
AVFrame* frame = av_frame_alloc();
if (!frame) {
hw_frame_desc_free(NULL, (void*)desc);
return NULL;
}
frame->opaque = fb;
frame->width = fb->width;
frame->height = fb->height;
frame->format = AV_PIX_FMT_DRM_PRIME;
frame->sample_aspect_ratio = (AVRational){1, 1};
AVBufferRef* desc_ref = av_buffer_create((void*)desc, sizeof(*desc),
hw_frame_desc_free, NULL, 0);
if (!desc_ref) {
hw_frame_desc_free(NULL, (void*)desc);
av_frame_free(&frame);
return NULL;
}
frame->buf[0] = desc_ref;
frame->data[0] = (void*)desc_ref->data;
// TODO: Set colorspace?
return frame;
}
static struct nvnc_fb* fb_queue_dequeue(struct fb_queue* queue)
{
if (TAILQ_EMPTY(queue))
return NULL;
struct fb_queue_entry* entry = TAILQ_FIRST(queue);
TAILQ_REMOVE(queue, entry, link);
struct nvnc_fb* fb = entry->fb;
free(entry);
return fb;
}
static int fb_queue_enqueue(struct fb_queue* queue, struct nvnc_fb* fb)
{
struct fb_queue_entry* entry = calloc(1, sizeof(*entry));
if (!entry)
return -1;
entry->fb = fb;
nvnc_fb_ref(fb);
TAILQ_INSERT_TAIL(queue, entry, link);
return 0;
}
static int h264_encoder__init_buffersrc(struct h264_encoder_ffmpeg* self)
{
int rc;
/* Placeholder values are used to pacify input checking and the real
* values are set below.
*/
rc = avfilter_graph_create_filter(&self->filter_in,
avfilter_get_by_name("buffer"), "in",
"width=1:height=1:pix_fmt=drm_prime:time_base=1/1", NULL,
self->filter_graph);
if (rc != 0)
return -1;
AVBufferSrcParameters *params = av_buffersrc_parameters_alloc();
if (!params)
return -1;
params->format = AV_PIX_FMT_DRM_PRIME;
params->width = self->width;
params->height = self->height;
params->sample_aspect_ratio = self->sample_aspect_ratio;
params->time_base = self->timebase;
params->hw_frames_ctx = self->hw_frames_ctx;
rc = av_buffersrc_parameters_set(self->filter_in, params);
assert(rc == 0);
av_free(params);
return 0;
}
static int h264_encoder__init_filters(struct h264_encoder_ffmpeg* self)
{
int rc;
self->filter_graph = avfilter_graph_alloc();
if (!self->filter_graph)
return -1;
rc = h264_encoder__init_buffersrc(self);
if (rc != 0)
goto failure;
rc = avfilter_graph_create_filter(&self->filter_out,
avfilter_get_by_name("buffersink"), "out", NULL,
NULL, self->filter_graph);
if (rc != 0)
goto failure;
AVFilterInOut* inputs = avfilter_inout_alloc();
if (!inputs)
goto failure;
inputs->name = av_strdup("in");
inputs->filter_ctx = self->filter_in;
inputs->pad_idx = 0;
inputs->next = NULL;
AVFilterInOut* outputs = avfilter_inout_alloc();
if (!outputs) {
avfilter_inout_free(&inputs);
goto failure;
}
outputs->name = av_strdup("out");
outputs->filter_ctx = self->filter_out;
outputs->pad_idx = 0;
outputs->next = NULL;
rc = avfilter_graph_parse(self->filter_graph,
"hwmap=mode=direct:derive_device=vaapi"
",scale_vaapi=format=nv12:mode=fast",
outputs, inputs, NULL);
if (rc != 0)
goto failure;
assert(self->hw_device_ctx);
for (unsigned int i = 0; i < self->filter_graph->nb_filters; ++i) {
self->filter_graph->filters[i]->hw_device_ctx =
av_buffer_ref(self->hw_device_ctx);
}
rc = avfilter_graph_config(self->filter_graph, NULL);
if (rc != 0)
goto failure;
return 0;
failure:
avfilter_graph_free(&self->filter_graph);
return -1;
}
static int h264_encoder__init_codec_context(struct h264_encoder_ffmpeg* self,
const AVCodec* codec, int quality)
{
self->codec_ctx = avcodec_alloc_context3(codec);
if (!self->codec_ctx)
return -1;
struct AVCodecContext* c = self->codec_ctx;
c->width = self->width;
c->height = self->height;
c->time_base = self->timebase;
c->sample_aspect_ratio = self->sample_aspect_ratio;
c->pix_fmt = AV_PIX_FMT_VAAPI;
c->gop_size = INT32_MAX; /* We'll select key frames manually */
c->max_b_frames = 0; /* B-frames are bad for latency */
c->global_quality = quality;
/* open-h264 requires baseline profile, so we use constrained
* baseline: AV_PROFILE_H264_BASELINE.
* But that is not supported by many clients. So we use a "DEFAULT" profile.
*
*/
c->profile = AV_PROFILE_H264_MAIN;
return 0;
}
static int h264_encoder__init_hw_frames_context(struct h264_encoder_ffmpeg* self)
{
self->hw_frames_ctx = av_hwframe_ctx_alloc(self->hw_device_ctx);
if (!self->hw_frames_ctx)
return -1;
AVHWFramesContext* c = (AVHWFramesContext*)self->hw_frames_ctx->data;
c->format = AV_PIX_FMT_DRM_PRIME;
c->sw_format = drm_to_av_pixel_format(self->format);
c->width = self->width;
c->height = self->height;
if (av_hwframe_ctx_init(self->hw_frames_ctx) < 0)
av_buffer_unref(&self->hw_frames_ctx);
return 0;
}
static int h264_encoder__schedule_work(struct h264_encoder_ffmpeg* self)
{
if (self->current_fb)
return 0;
self->current_fb = fb_queue_dequeue(&self->fb_queue);
if (!self->current_fb)
return 0;
DTRACE_PROBE1(neatvnc, h264_encode_frame_begin, self->current_fb->pts);
self->current_frame_is_keyframe = self->base.next_frame_should_be_keyframe;
self->base.next_frame_should_be_keyframe = false;
return aml_start(aml_get_default(), self->work);
}
static int h264_encoder__encode(struct h264_encoder_ffmpeg* self,
AVFrame* frame_in)
{
int rc;
rc = av_buffersrc_add_frame_flags(self->filter_in, frame_in,
AV_BUFFERSRC_FLAG_KEEP_REF);
if (rc != 0)
return -1;
AVFrame* filtered_frame = av_frame_alloc();
if (!filtered_frame)
return -1;
rc = av_buffersink_get_frame(self->filter_out, filtered_frame);
if (rc != 0)
goto get_frame_failure;
rc = avcodec_send_frame(self->codec_ctx, filtered_frame);
if (rc != 0)
goto send_frame_failure;
AVPacket* packet = av_packet_alloc();
assert(packet); // TODO
while (1) {
rc = avcodec_receive_packet(self->codec_ctx, packet);
if (rc != 0)
break;
vec_append(&self->current_packet, packet->data, packet->size);
packet->stream_index = 0;
av_packet_unref(packet);
}
// Frame should always start with a zero:
assert(self->current_packet.len == 0 ||
((char*)self->current_packet.data)[0] == 0);
av_packet_free(&packet);
send_frame_failure:
av_frame_unref(filtered_frame);
get_frame_failure:
av_frame_free(&filtered_frame);
return rc == AVERROR(EAGAIN) ? 0 : rc;
}
static void h264_encoder__do_work(void* handle)
{
struct h264_encoder_ffmpeg* self = aml_get_userdata(handle);
AVFrame* frame = fb_to_avframe(self->current_fb);
assert(frame); // TODO
frame->hw_frames_ctx = av_buffer_ref(self->hw_frames_ctx);
if (self->current_frame_is_keyframe) {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(58, 7, 100)
frame->flags |= AV_FRAME_FLAG_KEY;
#else
frame->key_frame = 1;
#endif
frame->pict_type = AV_PICTURE_TYPE_I;
} else {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(58, 7, 100)
frame->flags &= ~AV_FRAME_FLAG_KEY;
#else
frame->key_frame = 0;
#endif
frame->pict_type = AV_PICTURE_TYPE_P;
}
int rc = h264_encoder__encode(self, frame);
if (rc != 0) {
char err[256];
av_strerror(rc, err, sizeof(err));
nvnc_log(NVNC_LOG_ERROR, "Failed to encode packet: %s", err);
goto failure;
}
failure:
av_frame_unref(frame);
av_frame_free(&frame);
}
static void h264_encoder__on_work_done(void* handle)
{
struct h264_encoder_ffmpeg* self = aml_get_userdata(handle);
uint64_t pts = nvnc_fb_get_pts(self->current_fb);
nvnc_fb_release(self->current_fb);
nvnc_fb_unref(self->current_fb);
self->current_fb = NULL;
DTRACE_PROBE1(neatvnc, h264_encode_frame_end, pts);
if (self->please_destroy) {
vec_destroy(&self->current_packet);
h264_encoder_destroy(&self->base);
return;
}
if (self->current_packet.len == 0) {
nvnc_log(NVNC_LOG_WARNING, "Whoops, encoded packet length is 0");
return;
}
void* userdata = self->base.userdata;
// Must make a copy of packet because the callback might destroy the
// encoder object.
struct vec packet;
vec_init(&packet, self->current_packet.len);
vec_append(&packet, self->current_packet.data,
self->current_packet.len);
vec_clear(&self->current_packet);
h264_encoder__schedule_work(self);
self->base.on_packet_ready(packet.data, packet.len, pts, userdata);
vec_destroy(&packet);
}
static int find_render_node(char *node, size_t maxlen) {
bool r = -1;
drmDevice *devices[64];
int n = drmGetDevices2(0, devices, sizeof(devices) / sizeof(devices[0]));
for (int i = 0; i < n; ++i) {
drmDevice *dev = devices[i];
if (!(dev->available_nodes & (1 << DRM_NODE_RENDER)))
continue;
strncpy(node, dev->nodes[DRM_NODE_RENDER], maxlen);
node[maxlen - 1] = '\0';
r = 0;
break;
}
drmFreeDevices(devices, n);
return r;
}
static struct h264_encoder* h264_encoder_ffmpeg_create(uint32_t width,
uint32_t height, uint32_t format, int quality)
{
int rc;
struct h264_encoder_ffmpeg* self = calloc(1, sizeof(*self));
if (!self)
return NULL;
self->base.impl = &h264_encoder_ffmpeg_impl;
if (vec_init(&self->current_packet, 65536) < 0)
goto packet_failure;
self->work = aml_work_new(h264_encoder__do_work,
h264_encoder__on_work_done, self, NULL);
if (!self->work)
goto worker_failure;
char render_node[64];
if (find_render_node(render_node, sizeof(render_node)) < 0)
goto render_node_failure;
rc = av_hwdevice_ctx_create(&self->hw_device_ctx,
AV_HWDEVICE_TYPE_DRM, render_node, NULL, 0);
if (rc != 0)
goto hwdevice_ctx_failure;
self->base.next_frame_should_be_keyframe = true;
TAILQ_INIT(&self->fb_queue);
self->width = width;
self->height = height;
self->format = format;
self->timebase = (AVRational){1, 1000000};
self->sample_aspect_ratio = (AVRational){1, 1};
self->av_pixel_format = drm_to_av_pixel_format(format);
if (self->av_pixel_format == AV_PIX_FMT_NONE)
goto pix_fmt_failure;
const AVCodec* codec = avcodec_find_encoder_by_name("h264_vaapi");
if (!codec)
goto codec_failure;
if (h264_encoder__init_hw_frames_context(self) < 0)
goto hw_frames_context_failure;
if (h264_encoder__init_filters(self) < 0)
goto filter_failure;
if (h264_encoder__init_codec_context(self, codec, quality) < 0)
goto codec_context_failure;
self->codec_ctx->hw_frames_ctx =
av_buffer_ref(self->filter_out->inputs[0]->hw_frames_ctx);
AVDictionary *opts = NULL;
av_dict_set_int(&opts, "async_depth", 1, 0);
rc = avcodec_open2(self->codec_ctx, codec, &opts);
av_dict_free(&opts);
if (rc != 0)
goto avcodec_open_failure;
return &self->base;
avcodec_open_failure:
avcodec_free_context(&self->codec_ctx);
codec_context_failure:
filter_failure:
av_buffer_unref(&self->hw_frames_ctx);
hw_frames_context_failure:
codec_failure:
pix_fmt_failure:
av_buffer_unref(&self->hw_device_ctx);
hwdevice_ctx_failure:
render_node_failure:
aml_unref(self->work);
worker_failure:
vec_destroy(&self->current_packet);
packet_failure:
free(self);
return NULL;
}
static void h264_encoder_ffmpeg_destroy(struct h264_encoder* base)
{
struct h264_encoder_ffmpeg* self = (struct h264_encoder_ffmpeg*)base;
if (self->current_fb) {
self->please_destroy = true;
return;
}
vec_destroy(&self->current_packet);
av_buffer_unref(&self->hw_frames_ctx);
avcodec_free_context(&self->codec_ctx);
av_buffer_unref(&self->hw_device_ctx);
avfilter_graph_free(&self->filter_graph);
aml_unref(self->work);
free(self);
}
static void h264_encoder_ffmpeg_feed(struct h264_encoder* base,
struct nvnc_fb* fb)
{
struct h264_encoder_ffmpeg* self = (struct h264_encoder_ffmpeg*)base;
assert(fb->type == NVNC_FB_GBM_BO);
// TODO: Add transform filter
assert(fb->transform == NVNC_TRANSFORM_NORMAL);
int rc = fb_queue_enqueue(&self->fb_queue, fb);
assert(rc == 0); // TODO
nvnc_fb_hold(fb);
rc = h264_encoder__schedule_work(self);
assert(rc == 0); // TODO
}
struct h264_encoder_impl h264_encoder_ffmpeg_impl = {
.create = h264_encoder_ffmpeg_create,
.destroy = h264_encoder_ffmpeg_destroy,
.feed = h264_encoder_ffmpeg_feed,
};

View File

@ -1,741 +0,0 @@
/*
* Copyright (c) 2024 Andri Yngvason
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "h264-encoder.h"
#include "neatvnc.h"
#include "fb.h"
#include "pixels.h"
#include <assert.h>
#include <string.h>
#include <stdio.h>
#include <inttypes.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <linux/videodev2.h>
#include <drm_fourcc.h>
#include <gbm.h>
#include <aml.h>
#include <dirent.h>
#define UDIV_UP(a, b) (((a) + (b) - 1) / (b))
#define ALIGN_UP(a, b) ((b) * UDIV_UP((a), (b)))
#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
#define N_SRC_BUFS 3
#define N_DST_BUFS 3
struct h264_encoder_v4l2m2m_dst_buf {
struct v4l2_buffer buffer;
struct v4l2_plane plane;
void* payload;
};
struct h264_encoder_v4l2m2m_src_buf {
struct v4l2_buffer buffer;
struct v4l2_plane planes[4];
int fd;
bool is_taken;
struct nvnc_fb* fb;
};
struct h264_encoder_v4l2m2m {
struct h264_encoder base;
uint32_t width;
uint32_t height;
uint32_t format;
int quality; // TODO: Can we affect the quality?
char driver[16];
int fd;
struct aml_handler* handler;
struct h264_encoder_v4l2m2m_src_buf src_bufs[N_SRC_BUFS];
int src_buf_index;
struct h264_encoder_v4l2m2m_dst_buf dst_bufs[N_DST_BUFS];
};
struct h264_encoder_impl h264_encoder_v4l2m2m_impl;
static int v4l2_qbuf(int fd, const struct v4l2_buffer* inbuf)
{
assert(inbuf->length <= 4);
struct v4l2_plane planes[4];
struct v4l2_buffer outbuf;
outbuf = *inbuf;
memcpy(&planes, inbuf->m.planes, inbuf->length * sizeof(planes[0]));
outbuf.m.planes = planes;
return ioctl(fd, VIDIOC_QBUF, &outbuf);
}
static inline int v4l2_dqbuf(int fd, struct v4l2_buffer* buf)
{
return ioctl(fd, VIDIOC_DQBUF, buf);
}
static struct h264_encoder_v4l2m2m_src_buf* take_src_buffer(
struct h264_encoder_v4l2m2m* self)
{
unsigned int count = 0;
int i = self->src_buf_index;
struct h264_encoder_v4l2m2m_src_buf* buffer;
do {
buffer = &self->src_bufs[i++];
i %= ARRAY_LENGTH(self->src_bufs);
} while (++count < ARRAY_LENGTH(self->src_bufs) && buffer->is_taken);
if (buffer->is_taken)
return NULL;
self->src_buf_index = i;
buffer->is_taken = true;
return buffer;
}
static bool any_src_buf_is_taken(struct h264_encoder_v4l2m2m* self)
{
bool result = false;
for (unsigned int i = 0; i < ARRAY_LENGTH(self->src_bufs); ++i)
if (self->src_bufs[i].is_taken)
result = true;
return result;
}
static int u32_cmp(const void* pa, const void* pb)
{
const uint32_t *a = pa;
const uint32_t *b = pb;
return *a < *b ? -1 : *a > *b;
}
static size_t get_supported_formats(struct h264_encoder_v4l2m2m* self,
uint32_t* formats, size_t max_len)
{
size_t i = 0;
for (;; ++i) {
struct v4l2_fmtdesc desc = {
.index = i,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
};
int rc = ioctl(self->fd, VIDIOC_ENUM_FMT, &desc);
if (rc < 0)
break;
nvnc_trace("Got pixel format: %s", desc.description);
formats[i] = desc.pixelformat;
}
qsort(formats, i, sizeof(*formats), u32_cmp);
return i;
}
static bool have_v4l2_format(const uint32_t* formats, size_t n_formats,
uint32_t format)
{
return bsearch(&format, formats, n_formats, sizeof(format), u32_cmp);
}
static uint32_t v4l2_format_from_drm(const uint32_t* formats,
size_t n_formats, uint32_t drm_format)
{
#define TRY_FORMAT(f) \
if (have_v4l2_format(formats, n_formats, f)) \
return f
switch (drm_format) {
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
TRY_FORMAT(V4L2_PIX_FMT_RGBX32);
TRY_FORMAT(V4L2_PIX_FMT_RGBA32);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
TRY_FORMAT(V4L2_PIX_FMT_XRGB32);
TRY_FORMAT(V4L2_PIX_FMT_ARGB32);
TRY_FORMAT(V4L2_PIX_FMT_RGB32);
break;
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRA8888:
TRY_FORMAT(V4L2_PIX_FMT_XBGR32);
TRY_FORMAT(V4L2_PIX_FMT_ABGR32);
TRY_FORMAT(V4L2_PIX_FMT_BGR32);
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
TRY_FORMAT(V4L2_PIX_FMT_BGRX32);
TRY_FORMAT(V4L2_PIX_FMT_BGRA32);
break;
// TODO: More formats
}
return 0;
#undef TRY_FORMAT
}
// This driver mixes up pixel formats...
static uint32_t v4l2_format_from_drm_bcm2835(const uint32_t* formats,
size_t n_formats, uint32_t drm_format)
{
switch (drm_format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
return V4L2_PIX_FMT_RGBA32;
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRA8888:
// TODO: This could also be ABGR, based on how this driver
// behaves
return V4L2_PIX_FMT_BGR32;
}
return 0;
}
static int set_src_fmt(struct h264_encoder_v4l2m2m* self)
{
int rc;
uint32_t supported_formats[256];
size_t n_formats = get_supported_formats(self, supported_formats,
ARRAY_LENGTH(supported_formats));
uint32_t format;
if (strcmp(self->driver, "bcm2835-codec") == 0)
format = v4l2_format_from_drm_bcm2835(supported_formats,
n_formats, self->format);
else
format = v4l2_format_from_drm(supported_formats, n_formats,
self->format);
if (!format) {
nvnc_log(NVNC_LOG_DEBUG, "Failed to find a proper pixel format");
return -1;
}
struct v4l2_format fmt = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
};
rc = ioctl(self->fd, VIDIOC_G_FMT, &fmt);
if (rc < 0) {
return -1;
}
struct v4l2_pix_format_mplane* pix_fmt = &fmt.fmt.pix_mp;
pix_fmt->pixelformat = format;
pix_fmt->width = ALIGN_UP(self->width, 16);
pix_fmt->height = ALIGN_UP(self->height, 16);
rc = ioctl(self->fd, VIDIOC_S_FMT, &fmt);
if (rc < 0) {
return -1;
}
return 0;
}
static int set_dst_fmt(struct h264_encoder_v4l2m2m* self)
{
int rc;
struct v4l2_format fmt = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
};
rc = ioctl(self->fd, VIDIOC_G_FMT, &fmt);
if (rc < 0) {
return -1;
}
struct v4l2_pix_format_mplane* pix_fmt = &fmt.fmt.pix_mp;
pix_fmt->pixelformat = V4L2_PIX_FMT_H264;
pix_fmt->width = self->width;
pix_fmt->height = self->height;
rc = ioctl(self->fd, VIDIOC_S_FMT, &fmt);
if (rc < 0) {
return -1;
}
return 0;
}
static int alloc_dst_buffers(struct h264_encoder_v4l2m2m* self)
{
int n_bufs = ARRAY_LENGTH(self->dst_bufs);
int rc;
struct v4l2_requestbuffers req = {
.memory = V4L2_MEMORY_MMAP,
.count = n_bufs,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
};
rc = ioctl(self->fd, VIDIOC_REQBUFS, &req);
if (rc < 0)
return -1;
for (unsigned int i = 0; i < req.count; ++i) {
struct h264_encoder_v4l2m2m_dst_buf* buffer = &self->dst_bufs[i];
struct v4l2_buffer* buf = &buffer->buffer;
buf->index = i;
buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
buf->memory = V4L2_MEMORY_MMAP;
buf->length = 1;
buf->m.planes = &buffer->plane;
rc = ioctl(self->fd, VIDIOC_QUERYBUF, buf);
if (rc < 0)
return -1;
buffer->payload = mmap(0, buffer->plane.length,
PROT_READ | PROT_WRITE, MAP_SHARED, self->fd,
buffer->plane.m.mem_offset);
if (buffer->payload == MAP_FAILED) {
nvnc_log(NVNC_LOG_ERROR, "Whoops, mapping failed: %m");
return -1;
}
}
return 0;
}
static void enqueue_dst_buffers(struct h264_encoder_v4l2m2m* self)
{
for (unsigned int i = 0; i < ARRAY_LENGTH(self->dst_bufs); ++i) {
int rc = v4l2_qbuf(self->fd, &self->dst_bufs[i].buffer);
assert(rc >= 0);
}
}
static void process_dst_bufs(struct h264_encoder_v4l2m2m* self)
{
int rc;
struct v4l2_plane plane = { 0 };
struct v4l2_buffer buf = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
.memory = V4L2_MEMORY_MMAP,
.length = 1,
.m.planes = &plane,
};
while (true) {
rc = v4l2_dqbuf(self->fd, &buf);
if (rc < 0)
break;
uint64_t pts = buf.timestamp.tv_sec * UINT64_C(1000000) +
buf.timestamp.tv_usec;
struct h264_encoder_v4l2m2m_dst_buf* dstbuf =
&self->dst_bufs[buf.index];
size_t size = buf.m.planes[0].bytesused;
static uint64_t last_pts;
if (last_pts && last_pts > pts) {
nvnc_log(NVNC_LOG_ERROR, "pts - last_pts = %"PRIi64,
(int64_t)pts - (int64_t)last_pts);
}
last_pts = pts;
nvnc_trace("Encoded frame (index %d) at %"PRIu64" µs with size: %zu",
buf.index, pts, size);
self->base.on_packet_ready(dstbuf->payload, size, pts,
self->base.userdata);
v4l2_qbuf(self->fd, &buf);
}
}
static void process_src_bufs(struct h264_encoder_v4l2m2m* self)
{
int rc;
struct v4l2_plane planes[4] = { 0 };
struct v4l2_buffer buf = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.memory = V4L2_MEMORY_DMABUF,
.length = 1,
.m.planes = planes,
};
while (true) {
rc = v4l2_dqbuf(self->fd, &buf);
if (rc < 0)
break;
struct h264_encoder_v4l2m2m_src_buf* srcbuf =
&self->src_bufs[buf.index];
srcbuf->is_taken = false;
// TODO: This assumes that there's only one fd
close(srcbuf->planes[0].m.fd);
nvnc_fb_unmap(srcbuf->fb);
nvnc_fb_release(srcbuf->fb);
nvnc_fb_unref(srcbuf->fb);
srcbuf->fb = NULL;
}
}
static void stream_off(struct h264_encoder_v4l2m2m* self)
{
int type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
ioctl(self->fd, VIDIOC_STREAMOFF, &type);
type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
ioctl(self->fd, VIDIOC_STREAMOFF, &type);
}
static void free_dst_buffers(struct h264_encoder_v4l2m2m* self)
{
for (unsigned int i = 0; i < ARRAY_LENGTH(self->dst_bufs); ++i) {
struct h264_encoder_v4l2m2m_dst_buf* buf = &self->dst_bufs[i];
munmap(buf->payload, buf->plane.length);
}
}
static int stream_on(struct h264_encoder_v4l2m2m* self)
{
int type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
ioctl(self->fd, VIDIOC_STREAMON, &type);
type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
return ioctl(self->fd, VIDIOC_STREAMON, &type);
}
static int alloc_src_buffers(struct h264_encoder_v4l2m2m* self)
{
int rc;
struct v4l2_requestbuffers req = {
.memory = V4L2_MEMORY_DMABUF,
.count = N_SRC_BUFS,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
};
rc = ioctl(self->fd, VIDIOC_REQBUFS, &req);
if (rc < 0)
return -1;
for (int i = 0; i < N_SRC_BUFS; ++i) {
struct h264_encoder_v4l2m2m_src_buf* buffer = &self->src_bufs[i];
struct v4l2_buffer* buf = &buffer->buffer;
buf->index = i;
buf->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
buf->memory = V4L2_MEMORY_DMABUF;
buf->length = 1;
buf->m.planes = buffer->planes;
rc = ioctl(self->fd, VIDIOC_QUERYBUF, buf);
if (rc < 0)
return -1;
}
return 0;
}
static void force_key_frame(struct h264_encoder_v4l2m2m* self)
{
struct v4l2_control ctrl = { 0 };
ctrl.id = V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME;
ctrl.value = 0;
ioctl(self->fd, VIDIOC_S_CTRL, &ctrl);
}
static void encode_buffer(struct h264_encoder_v4l2m2m* self,
struct nvnc_fb* fb)
{
struct h264_encoder_v4l2m2m_src_buf* srcbuf = take_src_buffer(self);
if (!srcbuf) {
nvnc_log(NVNC_LOG_ERROR, "Out of source buffers. Dropping frame...");
return;
}
assert(!srcbuf->fb);
nvnc_fb_ref(fb);
nvnc_fb_hold(fb);
/* For some reason the v4l2m2m h264 encoder in the Rapberry Pi 4 gets
* really glitchy unless the buffer is mapped first.
* This should probably be handled by the driver, but it's not.
*/
nvnc_fb_map(fb);
srcbuf->fb = fb;
struct gbm_bo* bo = nvnc_fb_get_gbm_bo(fb);
int n_planes = gbm_bo_get_plane_count(bo);
int fd = gbm_bo_get_fd(bo);
uint32_t height = ALIGN_UP(gbm_bo_get_height(bo), 16);
for (int i = 0; i < n_planes; ++i) {
uint32_t stride = gbm_bo_get_stride_for_plane(bo, i);
uint32_t offset = gbm_bo_get_offset(bo, i);
uint32_t size = stride * height;
srcbuf->buffer.m.planes[i].m.fd = fd;
srcbuf->buffer.m.planes[i].bytesused = size;
srcbuf->buffer.m.planes[i].length = size;
srcbuf->buffer.m.planes[i].data_offset = offset;
}
srcbuf->buffer.timestamp.tv_sec = fb->pts / UINT64_C(1000000);
srcbuf->buffer.timestamp.tv_usec = fb->pts % UINT64_C(1000000);
if (self->base.next_frame_should_be_keyframe)
force_key_frame(self);
self->base.next_frame_should_be_keyframe = false;
int rc = v4l2_qbuf(self->fd, &srcbuf->buffer);
if (rc < 0) {
nvnc_log(NVNC_LOG_PANIC, "Failed to enqueue buffer: %m");
}
}
static void process_fd_events(void* handle)
{
struct h264_encoder_v4l2m2m* self = aml_get_userdata(handle);
process_dst_bufs(self);
}
static void h264_encoder_v4l2m2m_configure(struct h264_encoder_v4l2m2m* self)
{
struct v4l2_control ctrl = { 0 };
ctrl.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE;
ctrl.value = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE;
ioctl(self->fd, VIDIOC_S_CTRL, &ctrl);
ctrl.id = V4L2_CID_MPEG_VIDEO_H264_I_PERIOD;
ctrl.value = INT_MAX;
ioctl(self->fd, VIDIOC_S_CTRL, &ctrl);
ctrl.id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE;
ctrl.value = V4L2_MPEG_VIDEO_BITRATE_MODE_CQ;
ioctl(self->fd, VIDIOC_S_CTRL, &ctrl);
ctrl.id = V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY;
ctrl.value = self->quality;
ioctl(self->fd, VIDIOC_S_CTRL, &ctrl);
}
static bool can_encode_to_h264(int fd)
{
size_t i = 0;
for (;; ++i) {
struct v4l2_fmtdesc desc = {
.index = i,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
};
int rc = ioctl(fd, VIDIOC_ENUM_FMT, &desc);
if (rc < 0)
break;
if (desc.pixelformat == V4L2_PIX_FMT_H264)
return true;
}
return false;
}
static bool can_handle_frame_size(int fd, uint32_t width, uint32_t height)
{
size_t i = 0;
for (;; ++i) {
struct v4l2_frmsizeenum size = {
.index = i,
.pixel_format = V4L2_PIX_FMT_H264,
};
int rc = ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &size);
if (rc < 0)
break;
switch (size.type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
if (size.discrete.width == width &&
size.discrete.height == height)
return true;
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
case V4L2_FRMSIZE_TYPE_STEPWISE:
if (size.stepwise.min_width <= width &&
width <= size.stepwise.max_width &&
size.stepwise.min_height <= height &&
height <= size.stepwise.max_height &&
(16 % size.stepwise.step_width) == 0 &&
(16 % size.stepwise.step_height) == 0)
return true;
break;
}
}
return false;
}
static bool is_device_capable(int fd, uint32_t width, uint32_t height)
{
struct v4l2_capability cap = { 0 };
int rc = ioctl(fd, VIDIOC_QUERYCAP, &cap);
if (rc < 0)
return false;
uint32_t required_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
if ((cap.capabilities & required_caps) != required_caps)
return false;
if (!can_encode_to_h264(fd))
return false;
if (!can_handle_frame_size(fd, width, height))
return false;
return true;
}
static int find_capable_device(uint32_t width, uint32_t height)
{
int fd = -1;
DIR *dir = opendir("/dev");
assert(dir);
for (;;) {
struct dirent* entry = readdir(dir);
if (!entry)
break;
if (strncmp(entry->d_name, "video", 5) != 0)
continue;
char path[256];
snprintf(path, sizeof(path), "/dev/%s", entry->d_name);
fd = open(path, O_RDWR | O_CLOEXEC);
if (fd < 0) {
continue;
}
if (is_device_capable(fd, width, height)) {
nvnc_log(NVNC_LOG_DEBUG, "Using v4l2m2m device: %s",
path);
break;
}
close(fd);
fd = -1;
}
closedir(dir);
return fd;
}
static struct h264_encoder* h264_encoder_v4l2m2m_create(uint32_t width,
uint32_t height, uint32_t format, int quality)
{
struct h264_encoder_v4l2m2m* self = calloc(1, sizeof(*self));
if (!self)
return NULL;
self->base.impl = &h264_encoder_v4l2m2m_impl;
self->fd = -1;
self->width = width;
self->height = height;
self->format = format;
self->quality = quality;
self->fd = find_capable_device(width, height);
if (self->fd < 0)
goto failure;
struct v4l2_capability cap = { 0 };
ioctl(self->fd, VIDIOC_QUERYCAP, &cap);
strncpy(self->driver, (const char*)cap.driver, sizeof(self->driver));
if (set_src_fmt(self) < 0)
goto failure;
if (set_dst_fmt(self) < 0)
goto failure;
h264_encoder_v4l2m2m_configure(self);
if (alloc_dst_buffers(self) < 0)
goto failure;
if (alloc_src_buffers(self) < 0)
goto failure;
enqueue_dst_buffers(self);
if (stream_on(self) < 0)
goto failure;
int flags = fcntl(self->fd, F_GETFL);
fcntl(self->fd, F_SETFL, flags | O_NONBLOCK);
self->handler = aml_handler_new(self->fd, process_fd_events, self, NULL);
aml_set_event_mask(self->handler, AML_EVENT_READ);
if (aml_start(aml_get_default(), self->handler) < 0) {
aml_unref(self->handler);
goto failure;
}
return &self->base;
failure:
if (self->fd >= 0)
close(self->fd);
return NULL;
}
static void claim_all_src_bufs(
struct h264_encoder_v4l2m2m* self)
{
for (;;) {
process_src_bufs(self);
if (!any_src_buf_is_taken(self))
break;
usleep(10000);
}
}
static void h264_encoder_v4l2m2m_destroy(struct h264_encoder* base)
{
struct h264_encoder_v4l2m2m* self = (struct h264_encoder_v4l2m2m*)base;
claim_all_src_bufs(self);
aml_stop(aml_get_default(), self->handler);
aml_unref(self->handler);
stream_off(self);
free_dst_buffers(self);
if (self->fd >= 0)
close(self->fd);
free(self);
}
static void h264_encoder_v4l2m2m_feed(struct h264_encoder* base,
struct nvnc_fb* fb)
{
struct h264_encoder_v4l2m2m* self = (struct h264_encoder_v4l2m2m*)base;
process_src_bufs(self);
encode_buffer(self, fb);
}
struct h264_encoder_impl h264_encoder_v4l2m2m_impl = {
.create = h264_encoder_v4l2m2m_create,
.destroy = h264_encoder_v4l2m2m_destroy,
.feed = h264_encoder_v4l2m2m_feed,
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024 Andri Yngvason
* Copyright (c) 2021 - 2022 Andri Yngvason
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -15,60 +15,606 @@
*/
#include "h264-encoder.h"
#include "config.h"
#include "neatvnc.h"
#include "fb.h"
#include "sys/queue.h"
#include "vec.h"
#include "usdt.h"
#ifdef HAVE_FFMPEG
extern struct h264_encoder_impl h264_encoder_ffmpeg_impl;
#endif
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <unistd.h>
#include <assert.h>
#include <gbm.h>
#include <xf86drm.h>
#include <aml.h>
#ifdef HAVE_V4L2
extern struct h264_encoder_impl h264_encoder_v4l2m2m_impl;
#endif
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext.h>
#include <libavutil/hwcontext_drm.h>
#include <libavutil/pixdesc.h>
#include <libavutil/dict.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libdrm/drm_fourcc.h>
struct h264_encoder;
struct fb_queue_entry {
struct nvnc_fb* fb;
TAILQ_ENTRY(fb_queue_entry) link;
};
TAILQ_HEAD(fb_queue, fb_queue_entry);
struct h264_encoder {
h264_encoder_packet_handler_fn on_packet_ready;
void* userdata;
uint32_t width;
uint32_t height;
uint32_t format;
AVRational timebase;
AVRational sample_aspect_ratio;
enum AVPixelFormat av_pixel_format;
/* type: AVHWDeviceContext */
AVBufferRef* hw_device_ctx;
/* type: AVHWFramesContext */
AVBufferRef* hw_frames_ctx;
AVCodecContext* codec_ctx;
AVFilterGraph* filter_graph;
AVFilterContext* filter_in;
AVFilterContext* filter_out;
bool next_frame_should_be_keyframe;
struct fb_queue fb_queue;
struct aml_work* work;
struct nvnc_fb* current_fb;
struct vec current_packet;
bool current_frame_is_keyframe;
bool please_destroy;
};
static enum AVPixelFormat drm_to_av_pixel_format(uint32_t format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
return AV_PIX_FMT_BGR0;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return AV_PIX_FMT_RGB0;
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
return AV_PIX_FMT_0BGR;
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRA8888:
return AV_PIX_FMT_0RGB;
}
return AV_PIX_FMT_NONE;
}
static void hw_frame_desc_free(void* opaque, uint8_t* data)
{
struct AVDRMFrameDescriptor* desc = (void*)data;
assert(desc);
for (int i = 0; i < desc->nb_objects; ++i)
close(desc->objects[i].fd);
free(desc);
}
// TODO: Maybe do this once per frame inside nvnc_fb?
static AVFrame* fb_to_avframe(struct nvnc_fb* fb)
{
struct gbm_bo* bo = fb->bo;
int n_planes = gbm_bo_get_plane_count(bo);
AVDRMFrameDescriptor* desc = calloc(1, sizeof(*desc));
desc->nb_objects = n_planes;
desc->nb_layers = 1;
desc->layers[0].format = gbm_bo_get_format(bo);
desc->layers[0].nb_planes = n_planes;
for (int i = 0; i < n_planes; ++i) {
uint32_t stride = gbm_bo_get_stride_for_plane(bo, i);
desc->objects[i].fd = gbm_bo_get_fd_for_plane(bo, i);
desc->objects[i].size = stride * fb->height;
desc->objects[i].format_modifier = gbm_bo_get_modifier(bo);
desc->layers[0].format = gbm_bo_get_format(bo);
desc->layers[0].planes[i].object_index = i;
desc->layers[0].planes[i].offset = gbm_bo_get_offset(bo, i);
desc->layers[0].planes[i].pitch = stride;
}
AVFrame* frame = av_frame_alloc();
if (!frame) {
hw_frame_desc_free(NULL, (void*)desc);
return NULL;
}
frame->opaque = fb;
frame->width = fb->width;
frame->height = fb->height;
frame->format = AV_PIX_FMT_DRM_PRIME;
frame->sample_aspect_ratio = (AVRational){1, 1};
AVBufferRef* desc_ref = av_buffer_create((void*)desc, sizeof(*desc),
hw_frame_desc_free, NULL, 0);
if (!desc_ref) {
hw_frame_desc_free(NULL, (void*)desc);
av_frame_free(&frame);
return NULL;
}
frame->buf[0] = desc_ref;
frame->data[0] = (void*)desc_ref->data;
// TODO: Set colorspace?
return frame;
}
static struct nvnc_fb* fb_queue_dequeue(struct fb_queue* queue)
{
if (TAILQ_EMPTY(queue))
return NULL;
struct fb_queue_entry* entry = TAILQ_FIRST(queue);
TAILQ_REMOVE(queue, entry, link);
struct nvnc_fb* fb = entry->fb;
free(entry);
return fb;
}
static int fb_queue_enqueue(struct fb_queue* queue, struct nvnc_fb* fb)
{
struct fb_queue_entry* entry = calloc(1, sizeof(*entry));
if (!entry)
return -1;
entry->fb = fb;
nvnc_fb_ref(fb);
TAILQ_INSERT_TAIL(queue, entry, link);
return 0;
}
static int h264_encoder__init_buffersrc(struct h264_encoder* self)
{
int rc;
/* Placeholder values are used to pacify input checking and the real
* values are set below.
*/
rc = avfilter_graph_create_filter(&self->filter_in,
avfilter_get_by_name("buffer"), "in",
"width=1:height=1:pix_fmt=drm_prime:time_base=1/1", NULL,
self->filter_graph);
if (rc != 0)
return -1;
AVBufferSrcParameters *params = av_buffersrc_parameters_alloc();
if (!params)
return -1;
params->format = AV_PIX_FMT_DRM_PRIME;
params->width = self->width;
params->height = self->height;
params->sample_aspect_ratio = self->sample_aspect_ratio;
params->time_base = self->timebase;
params->hw_frames_ctx = self->hw_frames_ctx;
rc = av_buffersrc_parameters_set(self->filter_in, params);
assert(rc == 0);
av_free(params);
return 0;
}
static int h264_encoder__init_filters(struct h264_encoder* self)
{
int rc;
self->filter_graph = avfilter_graph_alloc();
if (!self->filter_graph)
return -1;
rc = h264_encoder__init_buffersrc(self);
if (rc != 0)
goto failure;
rc = avfilter_graph_create_filter(&self->filter_out,
avfilter_get_by_name("buffersink"), "out", NULL,
NULL, self->filter_graph);
if (rc != 0)
goto failure;
AVFilterInOut* inputs = avfilter_inout_alloc();
if (!inputs)
goto failure;
inputs->name = av_strdup("in");
inputs->filter_ctx = self->filter_in;
inputs->pad_idx = 0;
inputs->next = NULL;
AVFilterInOut* outputs = avfilter_inout_alloc();
if (!outputs) {
avfilter_inout_free(&inputs);
goto failure;
}
outputs->name = av_strdup("out");
outputs->filter_ctx = self->filter_out;
outputs->pad_idx = 0;
outputs->next = NULL;
rc = avfilter_graph_parse(self->filter_graph,
"hwmap=mode=direct:derive_device=vaapi"
",scale_vaapi=format=nv12:mode=fast",
outputs, inputs, NULL);
if (rc != 0)
goto failure;
assert(self->hw_device_ctx);
for (unsigned int i = 0; i < self->filter_graph->nb_filters; ++i) {
self->filter_graph->filters[i]->hw_device_ctx =
av_buffer_ref(self->hw_device_ctx);
}
rc = avfilter_graph_config(self->filter_graph, NULL);
if (rc != 0)
goto failure;
return 0;
failure:
avfilter_graph_free(&self->filter_graph);
return -1;
}
static int h264_encoder__init_codec_context(struct h264_encoder* self,
const AVCodec* codec, int quality)
{
self->codec_ctx = avcodec_alloc_context3(codec);
if (!self->codec_ctx)
return -1;
struct AVCodecContext* c = self->codec_ctx;
c->width = self->width;
c->height = self->height;
c->time_base = self->timebase;
c->sample_aspect_ratio = self->sample_aspect_ratio;
c->pix_fmt = AV_PIX_FMT_VAAPI;
c->gop_size = INT32_MAX; /* We'll select key frames manually */
c->max_b_frames = 0; /* B-frames are bad for latency */
c->global_quality = quality;
/* open-h264 requires baseline profile, so we use constrained
* baseline.
*/
c->profile = 578;
return 0;
}
static int h264_encoder__init_hw_frames_context(struct h264_encoder* self)
{
self->hw_frames_ctx = av_hwframe_ctx_alloc(self->hw_device_ctx);
if (!self->hw_frames_ctx)
return -1;
AVHWFramesContext* c = (AVHWFramesContext*)self->hw_frames_ctx->data;
c->format = AV_PIX_FMT_DRM_PRIME;
c->sw_format = drm_to_av_pixel_format(self->format);
c->width = self->width;
c->height = self->height;
if (av_hwframe_ctx_init(self->hw_frames_ctx) < 0)
av_buffer_unref(&self->hw_frames_ctx);
return 0;
}
static int h264_encoder__schedule_work(struct h264_encoder* self)
{
if (self->current_fb)
return 0;
self->current_fb = fb_queue_dequeue(&self->fb_queue);
if (!self->current_fb)
return 0;
DTRACE_PROBE1(neatvnc, h264_encode_frame_begin, self->current_fb->pts);
self->current_frame_is_keyframe = self->next_frame_should_be_keyframe;
self->next_frame_should_be_keyframe = false;
return aml_start(aml_get_default(), self->work);
}
static int h264_encoder__encode(struct h264_encoder* self, AVFrame* frame_in)
{
int rc;
rc = av_buffersrc_add_frame_flags(self->filter_in, frame_in,
AV_BUFFERSRC_FLAG_KEEP_REF);
if (rc != 0)
return -1;
AVFrame* filtered_frame = av_frame_alloc();
if (!filtered_frame)
return -1;
rc = av_buffersink_get_frame(self->filter_out, filtered_frame);
if (rc != 0)
goto get_frame_failure;
rc = avcodec_send_frame(self->codec_ctx, filtered_frame);
if (rc != 0)
goto send_frame_failure;
AVPacket* packet = av_packet_alloc();
assert(packet); // TODO
while (1) {
rc = avcodec_receive_packet(self->codec_ctx, packet);
if (rc != 0)
break;
vec_append(&self->current_packet, packet->data, packet->size);
packet->stream_index = 0;
av_packet_unref(packet);
}
// Frame should always start with a zero:
assert(self->current_packet.len == 0 ||
((char*)self->current_packet.data)[0] == 0);
av_packet_free(&packet);
send_frame_failure:
av_frame_unref(filtered_frame);
get_frame_failure:
av_frame_free(&filtered_frame);
return rc == AVERROR(EAGAIN) ? 0 : rc;
}
static void h264_encoder__do_work(void* handle)
{
struct h264_encoder* self = aml_get_userdata(handle);
AVFrame* frame = fb_to_avframe(self->current_fb);
assert(frame); // TODO
frame->hw_frames_ctx = av_buffer_ref(self->hw_frames_ctx);
if (self->current_frame_is_keyframe) {
frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
} else {
frame->key_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_P;
}
int rc = h264_encoder__encode(self, frame);
if (rc != 0) {
char err[256];
av_strerror(rc, err, sizeof(err));
nvnc_log(NVNC_LOG_ERROR, "Failed to encode packet: %s", err);
goto failure;
}
failure:
av_frame_unref(frame);
av_frame_free(&frame);
}
static void h264_encoder__on_work_done(void* handle)
{
struct h264_encoder* self = aml_get_userdata(handle);
uint64_t pts = nvnc_fb_get_pts(self->current_fb);
nvnc_fb_release(self->current_fb);
nvnc_fb_unref(self->current_fb);
self->current_fb = NULL;
DTRACE_PROBE1(neatvnc, h264_encode_frame_end, pts);
if (self->please_destroy) {
vec_destroy(&self->current_packet);
h264_encoder_destroy(self);
return;
}
if (self->current_packet.len == 0) {
nvnc_log(NVNC_LOG_WARNING, "Whoops, encoded packet length is 0");
return;
}
void* userdata = self->userdata;
// Must make a copy of packet because the callback might destroy the
// encoder object.
struct vec packet;
vec_init(&packet, self->current_packet.len);
vec_append(&packet, self->current_packet.data,
self->current_packet.len);
vec_clear(&self->current_packet);
h264_encoder__schedule_work(self);
self->on_packet_ready(packet.data, packet.len, pts, userdata);
vec_destroy(&packet);
}
static int find_render_node(char *node, size_t maxlen) {
bool r = -1;
drmDevice *devices[64];
int n = drmGetDevices2(0, devices, sizeof(devices) / sizeof(devices[0]));
for (int i = 0; i < n; ++i) {
drmDevice *dev = devices[i];
if (!(dev->available_nodes & (1 << DRM_NODE_RENDER)))
continue;
strncpy(node, dev->nodes[DRM_NODE_RENDER], maxlen);
node[maxlen - 1] = '\0';
r = 0;
break;
}
drmFreeDevices(devices, n);
return r;
}
struct h264_encoder* h264_encoder_create(uint32_t width, uint32_t height,
uint32_t format, int quality)
{
struct h264_encoder* encoder = NULL;
int rc;
#ifdef HAVE_V4L2
encoder = h264_encoder_v4l2m2m_impl.create(width, height, format, quality);
if (encoder) {
return encoder;
}
#endif
struct h264_encoder* self = calloc(1, sizeof(*self));
if (!self)
return NULL;
#ifdef HAVE_FFMPEG
encoder = h264_encoder_ffmpeg_impl.create(width, height, format, quality);
if (encoder) {
return encoder;
}
#endif
if (vec_init(&self->current_packet, 65536) < 0)
goto packet_failure;
return encoder;
self->work = aml_work_new(h264_encoder__do_work,
h264_encoder__on_work_done, self, NULL);
if (!self->work)
goto worker_failure;
char render_node[64];
if (find_render_node(render_node, sizeof(render_node)) < 0)
goto render_node_failure;
rc = av_hwdevice_ctx_create(&self->hw_device_ctx,
AV_HWDEVICE_TYPE_DRM, render_node, NULL, 0);
if (rc != 0)
goto hwdevice_ctx_failure;
self->next_frame_should_be_keyframe = true;
TAILQ_INIT(&self->fb_queue);
self->width = width;
self->height = height;
self->format = format;
self->timebase = (AVRational){1, 1000000};
self->sample_aspect_ratio = (AVRational){1, 1};
self->av_pixel_format = drm_to_av_pixel_format(format);
if (self->av_pixel_format == AV_PIX_FMT_NONE)
goto pix_fmt_failure;
const AVCodec* codec = avcodec_find_encoder_by_name("h264_vaapi");
if (!codec)
goto codec_failure;
if (h264_encoder__init_hw_frames_context(self) < 0)
goto hw_frames_context_failure;
if (h264_encoder__init_filters(self) < 0)
goto filter_failure;
if (h264_encoder__init_codec_context(self, codec, quality) < 0)
goto codec_context_failure;
self->codec_ctx->hw_frames_ctx =
av_buffer_ref(self->filter_out->inputs[0]->hw_frames_ctx);
AVDictionary *opts = NULL;
av_dict_set_int(&opts, "async_depth", 1, 0);
rc = avcodec_open2(self->codec_ctx, codec, &opts);
av_dict_free(&opts);
if (rc != 0)
goto avcodec_open_failure;
return self;
avcodec_open_failure:
avcodec_free_context(&self->codec_ctx);
codec_context_failure:
filter_failure:
av_buffer_unref(&self->hw_frames_ctx);
hw_frames_context_failure:
codec_failure:
pix_fmt_failure:
av_buffer_unref(&self->hw_device_ctx);
hwdevice_ctx_failure:
render_node_failure:
aml_unref(self->work);
worker_failure:
vec_destroy(&self->current_packet);
packet_failure:
free(self);
return NULL;
}
void h264_encoder_destroy(struct h264_encoder* self)
{
self->impl->destroy(self);
if (self->current_fb) {
self->please_destroy = true;
return;
}
vec_destroy(&self->current_packet);
av_buffer_unref(&self->hw_frames_ctx);
avcodec_free_context(&self->codec_ctx);
av_buffer_unref(&self->hw_device_ctx);
avfilter_graph_free(&self->filter_graph);
aml_unref(self->work);
free(self);
}
void h264_encoder_set_packet_handler_fn(struct h264_encoder* self,
h264_encoder_packet_handler_fn fn)
h264_encoder_packet_handler_fn value)
{
self->on_packet_ready = fn;
self->on_packet_ready = value;
}
void h264_encoder_set_userdata(struct h264_encoder* self, void* userdata)
void h264_encoder_set_userdata(struct h264_encoder* self, void* value)
{
self->userdata = userdata;
}
void h264_encoder_feed(struct h264_encoder* self, struct nvnc_fb* fb)
{
self->impl->feed(self, fb);
self->userdata = value;
}
void h264_encoder_request_keyframe(struct h264_encoder* self)
{
self->next_frame_should_be_keyframe = true;
}
void h264_encoder_feed(struct h264_encoder* self, struct nvnc_fb* fb)
{
assert(fb->type == NVNC_FB_GBM_BO);
// TODO: Add transform filter
assert(fb->transform == NVNC_TRANSFORM_NORMAL);
int rc = fb_queue_enqueue(&self->fb_queue, fb);
assert(rc == 0); // TODO
nvnc_fb_hold(fb);
rc = h264_encoder__schedule_work(self);
assert(rc == 0); // TODO
}

View File

@ -25,7 +25,6 @@
#include <stdarg.h>
#include <string.h>
#include <ctype.h>
#include <threads.h>
#ifdef HAVE_LIBAVUTIL
#include <libavutil/avutil.h>
@ -33,8 +32,10 @@
#define EXPORT __attribute__((visibility("default")))
static nvnc_log_fn log_fn = nvnc_default_logger;
static thread_local nvnc_log_fn thread_local_log_fn = NULL;
static void default_logger(const struct nvnc_log_data* meta,
const char* message);
static nvnc_log_fn log_fn = default_logger;
#ifndef NDEBUG
static enum nvnc_log_level log_level = NVNC_LOG_DEBUG;
@ -44,11 +45,6 @@ static enum nvnc_log_level log_level = NVNC_LOG_WARNING;
static bool is_initialised = false;
static nvnc_log_fn get_log_fn(void)
{
return thread_local_log_fn ? thread_local_log_fn : log_fn;
}
static char* trim_left(char* str)
{
while (isspace(*str))
@ -104,15 +100,14 @@ static void nvnc__vlog(const struct nvnc_log_data* meta, const char* fmt,
if (meta->level <= log_level) {
vsnprintf(message, sizeof(message), fmt, args);
get_log_fn()(meta, trim(message));
log_fn(meta, trim(message));
}
if (meta->level == NVNC_LOG_PANIC)
abort();
}
EXPORT
void nvnc_default_logger(const struct nvnc_log_data* meta,
static void default_logger(const struct nvnc_log_data* meta,
const char* message)
{
const char* level = log_level_to_string(meta->level);
@ -183,13 +178,7 @@ void nvnc_set_log_level(enum nvnc_log_level level)
EXPORT
void nvnc_set_log_fn(nvnc_log_fn fn)
{
log_fn = fn ? fn : nvnc_default_logger;
}
EXPORT
void nvnc_set_log_fn_thread_local(nvnc_log_fn fn)
{
thread_local_log_fn = fn;
log_fn = fn;
}
EXPORT

92
src/murmurhash.c 100644
View File

@ -0,0 +1,92 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 Joseph Werle
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include "murmurhash.h"
uint32_t
murmurhash (const char *key, uint32_t len, uint32_t seed) {
uint32_t c1 = 0xcc9e2d51;
uint32_t c2 = 0x1b873593;
uint32_t r1 = 15;
uint32_t r2 = 13;
uint32_t m = 5;
uint32_t n = 0xe6546b64;
uint32_t h = 0;
uint32_t k = 0;
uint8_t *d = (uint8_t *) key; // 32 bit extract from `key'
const uint32_t *chunks = NULL;
const uint8_t *tail = NULL; // tail - last 8 bytes
int i = 0;
int l = len / 4; // chunk length
h = seed;
chunks = (const uint32_t *) (d + l * 4); // body
tail = (const uint8_t *) (d + l * 4); // last 8 byte chunk of `key'
// for each 4 byte chunk of `key'
for (i = -l; i != 0; ++i) {
// next 4 byte chunk of `key'
k = chunks[i];
// encode next 4 byte chunk of `key'
k *= c1;
k = (k << r1) | (k >> (32 - r1));
k *= c2;
// append to hash
h ^= k;
h = (h << r2) | (h >> (32 - r2));
h = h * m + n;
}
k = 0;
// remainder
switch (len & 3) { // `len % 4'
case 3: k ^= (tail[2] << 16);
// fallthrough
case 2: k ^= (tail[1] << 8);
// fallthrough
case 1:
k ^= tail[0];
k *= c1;
k = (k << r1) | (k >> (32 - r1));
k *= c2;
h ^= k;
}
h ^= len;
h ^= (h >> 16);
h *= 0x85ebca6b;
h ^= (h >> 13);
h *= 0xc2b2ae35;
h ^= (h >> 16);
return h;
}

View File

@ -19,20 +19,20 @@
#include <stdlib.h>
#include <assert.h>
#include <libdrm/drm_fourcc.h>
#include <math.h>
#define POPCOUNT(x) __builtin_popcount(x)
#define UDIV_UP(a, b) (((a) + (b) - 1) / (b))
#define XSTR(s) STR(s)
#define STR(s) #s
static void pixel32_to_cpixel(uint8_t* restrict dst,
void pixel32_to_cpixel(uint8_t* restrict dst,
const struct rfb_pixel_format* dst_fmt,
const uint32_t* restrict src,
const struct rfb_pixel_format* src_fmt,
size_t bytes_per_cpixel, size_t len)
{
assert(src_fmt->true_colour_flag);
assert(src_fmt->bits_per_pixel == 32);
assert(src_fmt->depth <= 32);
assert(dst_fmt->true_colour_flag);
assert(dst_fmt->bits_per_pixel <= 32);
@ -152,148 +152,6 @@ static void pixel32_to_cpixel(uint8_t* restrict dst,
#undef CONVERT_PIXELS
}
void pixel_to_cpixel(uint8_t* restrict dst,
const struct rfb_pixel_format* dst_fmt,
const uint8_t* restrict src,
const struct rfb_pixel_format* src_fmt,
size_t bytes_per_cpixel, size_t len)
{
if (src_fmt->bits_per_pixel == 32) {
pixel32_to_cpixel(dst, dst_fmt, (uint32_t*)src, src_fmt, bytes_per_cpixel, len);
return;
}
assert(src_fmt->true_colour_flag);
assert(src_fmt->depth <= 32);
assert(dst_fmt->true_colour_flag);
assert(dst_fmt->bits_per_pixel <= 32);
assert(dst_fmt->depth <= 32);
assert(bytes_per_cpixel <= 4 && bytes_per_cpixel >= 1);
uint32_t src_bpp = src_fmt->bits_per_pixel / 8;
uint32_t src_red_shift = src_fmt->red_shift;
uint32_t src_green_shift = src_fmt->green_shift;
uint32_t src_blue_shift = src_fmt->blue_shift;
uint32_t dst_red_shift = dst_fmt->red_shift;
uint32_t dst_green_shift = dst_fmt->green_shift;
uint32_t dst_blue_shift = dst_fmt->blue_shift;
uint32_t src_red_max = src_fmt->red_max;
uint32_t src_green_max = src_fmt->green_max;
uint32_t src_blue_max = src_fmt->blue_max;
uint32_t src_red_bits = POPCOUNT(src_fmt->red_max);
uint32_t src_green_bits = POPCOUNT(src_fmt->green_max);
uint32_t src_blue_bits = POPCOUNT(src_fmt->blue_max);
uint32_t dst_red_bits = POPCOUNT(dst_fmt->red_max);
uint32_t dst_green_bits = POPCOUNT(dst_fmt->green_max);
uint32_t dst_blue_bits = POPCOUNT(dst_fmt->blue_max);
uint32_t dst_endian_correction;
#define CONVERT_PIXELS(cpx, px) \
{ \
uint32_t r, g, b; \
r = ((px >> src_red_shift) & src_red_max) << dst_red_bits \
>> src_red_bits << dst_red_shift; \
g = ((px >> src_green_shift) & src_green_max) << dst_green_bits\
>> src_green_bits << dst_green_shift; \
b = ((px >> src_blue_shift) & src_blue_max) << dst_blue_bits \
>> src_blue_bits << dst_blue_shift; \
cpx = r | g | b; \
}
switch (bytes_per_cpixel) {
case 4:
if (dst_fmt->big_endian_flag) {
while (len--) {
uint32_t cpx, px = 0;
memcpy(&px, src, src_bpp);
src += src_bpp;
CONVERT_PIXELS(cpx, px)
*dst++ = (cpx >> 24) & 0xff;
*dst++ = (cpx >> 16) & 0xff;
*dst++ = (cpx >> 8) & 0xff;
*dst++ = (cpx >> 0) & 0xff;
}
} else {
while (len--) {
uint32_t cpx, px = 0;
memcpy(&px, src, src_bpp);
src += src_bpp;
CONVERT_PIXELS(cpx, px)
*dst++ = (cpx >> 0) & 0xff;
*dst++ = (cpx >> 8) & 0xff;
*dst++ = (cpx >> 16) & 0xff;
*dst++ = (cpx >> 24) & 0xff;
}
}
break;
case 3:
if (dst_fmt->bits_per_pixel == 32 && dst_fmt->depth <= 24) {
uint32_t min_dst_shift = dst_red_shift;
if (min_dst_shift > dst_green_shift)
min_dst_shift = dst_green_shift;
if (min_dst_shift > dst_blue_shift)
min_dst_shift = dst_blue_shift;
dst_red_shift -= min_dst_shift;
dst_green_shift -= min_dst_shift;
dst_blue_shift -= min_dst_shift;
}
dst_endian_correction = dst_fmt->big_endian_flag ? 16 : 0;
while (len--) {
uint32_t cpx, px = 0;
memcpy(&px, src, src_bpp);
src += src_bpp;
CONVERT_PIXELS(cpx, px)
*dst++ = (cpx >> (0 ^ dst_endian_correction)) & 0xff;
*dst++ = (cpx >> 8) & 0xff;
*dst++ = (cpx >> (16 ^ dst_endian_correction)) & 0xff;
}
break;
case 2:
dst_endian_correction = dst_fmt->big_endian_flag ? 8 : 0;
while (len--) {
uint32_t cpx, px = 0;
memcpy(&px, src, src_bpp);
src += src_bpp;
CONVERT_PIXELS(cpx, px)
*dst++ = (cpx >> (0 ^ dst_endian_correction)) & 0xff;
*dst++ = (cpx >> (8 ^ dst_endian_correction)) & 0xff;
}
break;
case 1:
while (len--) {
uint32_t cpx, px = 0;
memcpy(&px, src, src_bpp);
src += src_bpp;
CONVERT_PIXELS(cpx, px)
*dst++ = cpx & 0xff;
}
break;
default:
abort();
}
#undef CONVERT_PIXELS
}
/* clang-format off */
int rfb_pixfmt_from_fourcc(struct rfb_pixel_format *dst, uint32_t src) {
switch (src & ~DRM_FORMAT_BIG_ENDIAN) {
@ -357,22 +215,6 @@ bpp_32:
dst->green_max = 0xff;
dst->blue_max = 0xff;
break;
case DRM_FORMAT_BGR888:
dst->red_shift = 0;
dst->green_shift = 8;
dst->blue_shift = 16;
goto bpp_24;
case DRM_FORMAT_RGB888:
dst->red_shift = 16;
dst->green_shift = 8;
dst->blue_shift = 0;
bpp_24:
dst->bits_per_pixel = 24;
dst->depth = 24;
dst->red_max = 0xff;
dst->green_max = 0xff;
dst->blue_max = 0xff;
break;
case DRM_FORMAT_RGBA4444:
case DRM_FORMAT_RGBX4444:
dst->red_shift = 12;
@ -433,9 +275,6 @@ int pixel_size_from_fourcc(uint32_t fourcc)
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
return 4;
case DRM_FORMAT_BGR888:
case DRM_FORMAT_RGB888:
return 3;
case DRM_FORMAT_RGBA4444:
case DRM_FORMAT_RGBX4444:
case DRM_FORMAT_BGRA4444:
@ -618,8 +457,6 @@ const char* drm_format_to_string(uint32_t fmt)
X(XRGB8888) \
X(ABGR8888) \
X(XBGR8888) \
X(RGB888) \
X(BGR888) \
X(RGBA4444) \
X(RGBX4444) \
X(BGRA4444) \
@ -637,6 +474,9 @@ const char* drm_format_to_string(uint32_t fmt)
// Not exact, but close enough for debugging
const char* rfb_pixfmt_to_string(const struct rfb_pixel_format* fmt)
{
if (!(fmt->red_max == fmt->green_max && fmt->red_max == fmt->blue_max))
goto failure;
uint32_t profile = (fmt->red_shift << 16) | (fmt->green_shift << 8)
| (fmt->blue_shift);
@ -655,25 +495,9 @@ const char* rfb_pixfmt_to_string(const struct rfb_pixel_format* fmt)
CASE(8, 4, 0): return "XRGB4444";
CASE(0, 4, 8): return "XBGR4444";
CASE(11, 5, 0): return "RGB565";
CASE(5, 2, 0): return "RGB332";
CASE(0, 2, 5): return "RGB332";
CASE(4, 2, 0): return "RGB222";
CASE(0, 2, 4): return "BGR222";
#undef CASE
}
failure:
return "UNKNOWN";
}
void make_rgb332_pal8_map(struct rfb_set_colour_map_entries_msg* msg)
{
msg->type = RFB_SERVER_TO_CLIENT_SET_COLOUR_MAP_ENTRIES;
msg->padding = 0;
msg->first_colour = htons(0);
msg->n_colours = htons(256);
for (unsigned int i = 0; i < 256; ++i) {
msg->colours[i].r = htons(round(65535.0 / 7.0 * ((i >> 5) & 7)));
msg->colours[i].g = htons(round(65535.0 / 7.0 * ((i >> 2) & 7)));
msg->colours[i].b = htons(round(65535.0 / 3.0 * (i & 3)));
}
}

View File

@ -69,10 +69,7 @@ static int raw_encode_box(struct raw_encoder_work* ctx, struct vec* dst,
if (rc < 0)
return -1;
uint8_t* b = fb->addr;
int32_t src_bpp = src_fmt->bits_per_pixel / 8;
int32_t xoff = x_start * src_bpp;
int32_t src_stride = fb->stride * src_bpp;
uint32_t* b = fb->addr;
int bpp = dst_fmt->bits_per_pixel / 8;
@ -83,8 +80,8 @@ static int raw_encode_box(struct raw_encoder_work* ctx, struct vec* dst,
uint8_t* d = dst->data;
for (int y = y_start; y < y_start + height; ++y) {
pixel_to_cpixel(d + dst->len, dst_fmt,
b + xoff + y * src_stride, src_fmt,
pixel32_to_cpixel(d + dst->len, dst_fmt,
b + x_start + y * stride, src_fmt,
bpp, width);
dst->len += width * bpp;
}
@ -134,7 +131,7 @@ static void raw_encoder_do_work(void* obj)
struct nvnc_fb* fb = ctx->fb;
assert(fb);
size_t bpp = ctx->output_format.bits_per_pixel / 8;
size_t bpp = nvnc_fb_get_pixel_size(fb);
size_t n_rects = pixman_region_n_rects(&ctx->damage);
if (n_rects > UINT16_MAX)
n_rects = 1;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019 - 2024 Andri Yngvason
* Copyright (c) 2019 - 2022 Andri Yngvason
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -75,19 +75,17 @@
#define EXPORT __attribute__((visibility("default")))
static int send_desktop_resize(struct nvnc_client* client, struct nvnc_fb* fb);
static bool send_ext_support_frame(struct nvnc_client* client);
static int send_qemu_key_ext_frame(struct nvnc_client* client);
static enum rfb_encodings choose_frame_encoding(struct nvnc_client* client,
const struct nvnc_fb*);
struct nvnc_fb*);
static void on_encode_frame_done(struct encoder*, struct rcbuf*, uint64_t pts);
static bool client_has_encoding(const struct nvnc_client* client,
enum rfb_encodings encoding);
static void process_fb_update_requests(struct nvnc_client* client);
static void sockaddr_to_string(char* dst, size_t sz,
const struct sockaddr* addr);
static const char* encoding_to_string(enum rfb_encodings encoding);
static bool client_send_led_state(struct nvnc_client* client);
#if defined(PROJECT_VERSION)
#if defined(GIT_VERSION)
EXPORT const char nvnc_version[] = GIT_VERSION;
#elif defined(PROJECT_VERSION)
EXPORT const char nvnc_version[] = PROJECT_VERSION;
#else
EXPORT const char nvnc_version[] = "UNKNOWN";
@ -143,8 +141,6 @@ static void client_close(struct nvnc_client* client)
client->encoder->on_done = NULL;
}
encoder_unref(client->encoder);
encoder_unref(client->zrle_encoder);
encoder_unref(client->tight_encoder);
pixman_region_fini(&client->damage);
free(client->cut_text.buffer);
free(client);
@ -163,7 +159,7 @@ static inline void client_ref(struct nvnc_client* client)
++client->ref;
}
static void do_deferred_client_close(void *obj)
static void deferred_client_close(void *obj)
{
client_unref(obj);
}
@ -173,14 +169,6 @@ static void stop_self(void* obj)
aml_stop(aml_get_default(), obj);
}
static void defer_client_close(struct nvnc_client* client)
{
struct aml_idle* idle = aml_idle_new(stop_self, client,
do_deferred_client_close);
aml_start(aml_get_default(), idle);
aml_unref(idle);
}
static void close_after_write(void* userdata, enum stream_req_status status)
{
struct nvnc_client* client = userdata;
@ -192,7 +180,10 @@ static void close_after_write(void* userdata, enum stream_req_status status)
* stays alive while the stream is processing its queue.
* TODO: Figure out some better resource management for clients
*/
defer_client_close(client);
struct aml_idle* idle = aml_idle_new(stop_self, client,
deferred_client_close);
aml_start(aml_get_default(), idle);
aml_unref(idle);
}
static int handle_unsupported_version(struct nvnc_client* client)
@ -236,9 +227,7 @@ static int on_version_message(struct nvnc_client* client)
(struct rfb_security_types_msg*)buf;
security->n = 0;
if (server->auth_flags & NVNC_AUTH_REQUIRE_AUTH) {
assert(server->auth_fn);
if (client->server->auth_fn) {
#ifdef ENABLE_TLS
if (server->tls_creds) {
security->types[security->n++] = RFB_SECURITY_TYPE_VENCRYPT;
@ -248,18 +237,13 @@ static int on_version_message(struct nvnc_client* client)
#ifdef HAVE_CRYPTO
security->types[security->n++] = RFB_SECURITY_TYPE_RSA_AES256;
security->types[security->n++] = RFB_SECURITY_TYPE_RSA_AES;
if (!(server->auth_flags & NVNC_AUTH_REQUIRE_ENCRYPTION)) {
security->types[security->n++] = RFB_SECURITY_TYPE_APPLE_DH;
}
#endif
} else {
security->n = 1;
security->types[0] = RFB_SECURITY_TYPE_NONE;
}
if (security->n == 0) {
nvnc_log(NVNC_LOG_PANIC, "Failed to satisfy requested security constraints");
security->n = 1;
security->types[0] = RFB_SECURITY_TYPE_NONE;
}
stream_write(client->net_stream, security, sizeof(*security) +
@ -270,20 +254,13 @@ static int on_version_message(struct nvnc_client* client)
}
static int security_handshake_failed(struct nvnc_client* client,
const char* username, const char* reason_string)
const char* reason_string)
{
if (username)
nvnc_log(NVNC_LOG_INFO, "Security handshake failed for \"%s\": %s",
username, reason_string);
else
nvnc_log(NVNC_LOG_INFO, "Security handshake: %s",
username, reason_string);
char buffer[256];
client->state = VNC_CLIENT_STATE_ERROR;
uint32_t* result = (uint32_t*)buffer;
uint8_t* result = (uint8_t*)buffer;
struct rfb_error_reason* reason =
(struct rfb_error_reason*)(buffer + sizeof(*result));
@ -299,15 +276,8 @@ static int security_handshake_failed(struct nvnc_client* client,
return 0;
}
static int security_handshake_ok(struct nvnc_client* client, const char* username)
static int security_handshake_ok(struct nvnc_client* client)
{
if (username) {
nvnc_log(NVNC_LOG_INFO, "User \"%s\" authenticated", username);
strncpy(client->username, username, sizeof(client->username));
client->username[sizeof(client->username) - 1] = '\0';
}
uint32_t result = htonl(RFB_SECURITY_HANDSHAKE_OK);
return stream_write(client->net_stream, &result, sizeof(result), NULL,
NULL);
@ -344,8 +314,7 @@ static int on_vencrypt_version_message(struct nvnc_client* client)
return 0;
if (msg->major != 0 || msg->minor != 2) {
security_handshake_failed(client, NULL,
"Unsupported VeNCrypt version");
security_handshake_failed(client, "Unsupported VeNCrypt version");
return sizeof(*msg);
}
@ -414,12 +383,16 @@ static int on_vencrypt_plain_auth_message(struct nvnc_client* client)
username[MIN(ulen, sizeof(username) - 1)] = '\0';
password[MIN(plen, sizeof(password) - 1)] = '\0';
strncpy(client->username, username, sizeof(client->username));
client->username[sizeof(client->username) - 1] = '\0';
if (server->auth_fn(username, password, server->auth_ud)) {
security_handshake_ok(client, username);
nvnc_log(NVNC_LOG_INFO, "User \"%s\" authenticated", username);
security_handshake_ok(client);
client->state = VNC_CLIENT_STATE_WAITING_FOR_INIT;
} else {
security_handshake_failed(client, username,
"Invalid username or password");
nvnc_log(NVNC_LOG_INFO, "User \"%s\" rejected", username);
security_handshake_failed(client, "Invalid username or password");
}
return sizeof(*msg) + ulen + plen;
@ -503,11 +476,13 @@ static int on_apple_dh_response(struct nvnc_client* client)
crypto_cipher_del(cipher);
if (server->auth_fn(username, password, server->auth_ud)) {
security_handshake_ok(client, username);
nvnc_log(NVNC_LOG_INFO, "User \"%s\" authenticated", username);
security_handshake_ok(client);
client->state = VNC_CLIENT_STATE_WAITING_FOR_INIT;
} else {
security_handshake_failed(client, username,
"Invalid username or password");
nvnc_log(NVNC_LOG_INFO, "User \"%s\" rejected", username);
security_handshake_failed(client, "Invalid username or password");
crypto_cipher_del(cipher);
}
return sizeof(*msg) + key_len;
@ -773,17 +748,17 @@ static int on_rsa_aes_credentials(struct nvnc_client* client)
char username[256];
char password[256];
memcpy(username, (const char*)(msg + 1), username_len);
username[username_len] = '\0';
memcpy(password, (const char*)(msg + 2 + username_len), password_len);
password[password_len] = '\0';
strlcpy(username, (const char*)(msg + 1), username_len + 1);
strlcpy(password, (const char*)(msg + 2 + username_len),
password_len + 1);
if (server->auth_fn(username, password, server->auth_ud)) {
security_handshake_ok(client, username);
nvnc_log(NVNC_LOG_INFO, "User \"%s\" authenticated", username);
security_handshake_ok(client);
client->state = VNC_CLIENT_STATE_WAITING_FOR_INIT;
} else {
security_handshake_failed(client, username,
"Invalid username or password");
nvnc_log(NVNC_LOG_INFO, "User \"%s\" rejected", username);
security_handshake_failed(client, "Invalid username or password");
}
return 2 + username_len + password_len;
@ -801,7 +776,7 @@ static int on_security_message(struct nvnc_client* client)
switch (type) {
case RFB_SECURITY_TYPE_NONE:
security_handshake_ok(client, NULL);
security_handshake_ok(client);
client->state = VNC_CLIENT_STATE_WAITING_FOR_INIT;
break;
#ifdef ENABLE_TLS
@ -831,8 +806,7 @@ static int on_security_message(struct nvnc_client* client)
break;
#endif
default:
security_handshake_failed(client, NULL,
"Unsupported security type");
security_handshake_failed(client, "Unsupported security type");
break;
}
@ -925,30 +899,6 @@ static int on_init_message(struct nvnc_client* client)
return sizeof(shared_flag);
}
static int cook_pixel_map(struct nvnc_client* client)
{
struct rfb_pixel_format* fmt = &client->pixfmt;
// We'll just pretend that this is rgb332
fmt->true_colour_flag = true;
fmt->big_endian_flag = false;
fmt->bits_per_pixel = 8;
fmt->depth = 8;
fmt->red_max = 7;
fmt->green_max = 7;
fmt->blue_max = 3;
fmt->red_shift = 5;
fmt->green_shift = 2;
fmt->blue_shift = 0;
uint8_t buf[sizeof(struct rfb_set_colour_map_entries_msg)
+ 256 * sizeof(struct rfb_colour_map_entry)];
struct rfb_set_colour_map_entries_msg* msg =
(struct rfb_set_colour_map_entries_msg*)buf;
make_rgb332_pal8_map(msg);
return stream_write(client->net_stream, buf, sizeof(buf), NULL, NULL);
}
static int on_client_set_pixel_format(struct nvnc_client* client)
{
if (client->buffer_len - client->buffer_index <
@ -959,49 +909,30 @@ static int on_client_set_pixel_format(struct nvnc_client* client)
(struct rfb_pixel_format*)(client->msg_buffer +
client->buffer_index + 4);
if (fmt->true_colour_flag) {
nvnc_log(NVNC_LOG_DEBUG, "Using color palette for client %p",
client);
if (!fmt->true_colour_flag) {
/* We don't really know what to do with color maps right now */
nvnc_client_close(client);
return 0;
}
fmt->red_max = ntohs(fmt->red_max);
fmt->green_max = ntohs(fmt->green_max);
fmt->blue_max = ntohs(fmt->blue_max);
memcpy(&client->pixfmt, fmt, sizeof(client->pixfmt));
} else {
nvnc_log(NVNC_LOG_DEBUG, "Using color palette for client %p",
client);
cook_pixel_map(client);
}
client->has_pixfmt = true;
client->formats_changed = true;
nvnc_log(NVNC_LOG_DEBUG, "Client %p chose pixel format: %s", client,
rfb_pixfmt_to_string(&client->pixfmt));
return 4 + sizeof(struct rfb_pixel_format);
}
static void encodings_to_string_list(char* dst, size_t len,
enum rfb_encodings* encodings, size_t n)
{
size_t off = 0;
if (n > 0)
off += snprintf(dst, len, "%s",
encoding_to_string(encodings[0]));
for (size_t i = 1; i < n; ++i)
off += snprintf(dst + off, len - off, ",%s",
encoding_to_string(encodings[i]));
}
static int on_client_set_encodings(struct nvnc_client* client)
{
struct rfb_client_set_encodings_msg* msg =
(struct rfb_client_set_encodings_msg*)(client->msg_buffer +
client->buffer_index);
size_t n_encodings = ntohs(msg->n_encodings);
size_t n_encodings = MIN(MAX_ENCODINGS, ntohs(msg->n_encodings));
size_t n = 0;
if (client->buffer_len - client->buffer_index <
@ -1010,7 +941,7 @@ static int on_client_set_encodings(struct nvnc_client* client)
client->quality = 10;
for (size_t i = 0; i < n_encodings && n < MAX_ENCODINGS; ++i) {
for (size_t i = 0; i < n_encodings; ++i) {
enum rfb_encodings encoding = htonl(msg->encodings[i]);
switch (encoding) {
@ -1026,18 +957,9 @@ static int on_client_set_encodings(struct nvnc_client* client)
case RFB_ENCODING_DESKTOPSIZE:
case RFB_ENCODING_EXTENDEDDESKTOPSIZE:
case RFB_ENCODING_QEMU_EXT_KEY_EVENT:
case RFB_ENCODING_QEMU_LED_STATE:
case RFB_ENCODING_VMWARE_LED_STATE:
#ifdef ENABLE_EXPERIMENTAL
case RFB_ENCODING_PTS:
case RFB_ENCODING_NTP:
#endif
client->encodings[n++] = encoding;
#ifndef ENABLE_EXPERIMENTAL
case RFB_ENCODING_PTS:
case RFB_ENCODING_NTP:
;
#endif
}
if (RFB_ENCODING_JPEG_LOWQ <= encoding &&
@ -1045,14 +967,7 @@ static int on_client_set_encodings(struct nvnc_client* client)
client->quality = encoding - RFB_ENCODING_JPEG_LOWQ;
}
char encoding_list[256] = {};
encodings_to_string_list(encoding_list, sizeof(encoding_list),
client->encodings, n);
nvnc_log(NVNC_LOG_DEBUG, "Client %p set encodings: %s", client,
encoding_list);
client->n_encodings = n;
client->formats_changed = true;
return sizeof(*msg) + 4 * n_encodings;
}
@ -1109,79 +1024,14 @@ static const char* encoding_to_string(enum rfb_encodings encoding)
{
switch (encoding) {
case RFB_ENCODING_RAW: return "raw";
case RFB_ENCODING_COPYRECT: return "copyrect";
case RFB_ENCODING_RRE: return "rre";
case RFB_ENCODING_HEXTILE: return "hextile";
case RFB_ENCODING_TIGHT: return "tight";
case RFB_ENCODING_TRLE: return "trle";
case RFB_ENCODING_ZRLE: return "zrle";
case RFB_ENCODING_OPEN_H264: return "open-h264";
case RFB_ENCODING_CURSOR: return "cursor";
case RFB_ENCODING_DESKTOPSIZE: return "desktop-size";
case RFB_ENCODING_EXTENDEDDESKTOPSIZE: return "extended-desktop-size";
case RFB_ENCODING_QEMU_EXT_KEY_EVENT: return "qemu-extended-key-event";
case RFB_ENCODING_QEMU_LED_STATE: return "qemu-led-state";
case RFB_ENCODING_VMWARE_LED_STATE: return "vmware-led-state";
case RFB_ENCODING_PTS: return "pts";
case RFB_ENCODING_NTP: return "ntp";
}
return "UNKNOWN";
}
static bool ensure_encoder(struct nvnc_client* client, const struct nvnc_fb *fb)
{
struct nvnc* server = client->server;
enum rfb_encodings encoding = choose_frame_encoding(client, fb);
if (client->encoder && encoding == encoder_get_type(client->encoder))
return true;
int width = server->display->buffer->width;
int height = server->display->buffer->height;
if (client->encoder) {
server->n_damage_clients -= !(client->encoder->impl->flags &
ENCODER_IMPL_FLAG_IGNORES_DAMAGE);
client->encoder->on_done = NULL;
}
encoder_unref(client->encoder);
/* Zlib streams need to be saved so we keep encoders around that
* use them.
*/
switch (encoding) {
case RFB_ENCODING_ZRLE:
if (!client->zrle_encoder) {
client->zrle_encoder =
encoder_new(encoding, width, height);
}
client->encoder = client->zrle_encoder;
encoder_ref(client->encoder);
break;
case RFB_ENCODING_TIGHT:
if (!client->tight_encoder) {
client->tight_encoder =
encoder_new(encoding, width, height);
}
client->encoder = client->tight_encoder;
encoder_ref(client->encoder);
break;
default:
client->encoder = encoder_new(encoding, width, height);
break;
}
if (!client->encoder) {
nvnc_log(NVNC_LOG_ERROR, "Failed to allocate new encoder");
return false;
}
server->n_damage_clients += !(client->encoder->impl->flags &
ENCODER_IMPL_FLAG_IGNORES_DAMAGE);
nvnc_log(NVNC_LOG_INFO, "Choosing %s encoding for client %p",
encoding_to_string(encoding), client);
return true;
return "UNKNOWN";
}
static void process_fb_update_requests(struct nvnc_client* client)
@ -1213,14 +1063,14 @@ static void process_fb_update_requests(struct nvnc_client* client)
return;
}
if (!client->is_ext_notified) {
client->is_ext_notified = true;
if (server->key_code_fn && !client->is_qemu_key_ext_notified
&& client_has_encoding(client, RFB_ENCODING_QEMU_EXT_KEY_EVENT)) {
send_qemu_key_ext_frame(client);
client->is_qemu_key_ext_notified = true;
if (send_ext_support_frame(client)) {
if (--client->n_pending_requests <= 0)
return;
}
}
if (server->cursor_seq != client->cursor_seq
&& client_has_encoding(client, RFB_ENCODING_CURSOR)) {
@ -1230,25 +1080,41 @@ static void process_fb_update_requests(struct nvnc_client* client)
return;
}
if (client_send_led_state(client)) {
if (--client->n_pending_requests <= 0)
return;
}
if (!pixman_region_not_empty(&client->damage))
return;
if (!ensure_encoder(client, fb))
return;
DTRACE_PROBE1(neatvnc, update_fb_start, client);
enum rfb_encodings encoding = choose_frame_encoding(client, fb);
if (!client->encoder || encoding != encoder_get_type(client->encoder)) {
int width = server->display->buffer->width;
int height = server->display->buffer->height;
if (client->encoder) {
server->n_damage_clients -=
!(client->encoder->impl->flags &
ENCODER_IMPL_FLAG_IGNORES_DAMAGE);
client->encoder->on_done = NULL;
}
encoder_unref(client->encoder);
client->encoder = encoder_new(encoding, width, height);
if (!client->encoder) {
nvnc_log(NVNC_LOG_ERROR, "Failed to allocate new encoder");
return;
}
server->n_damage_clients +=
!(client->encoder->impl->flags &
ENCODER_IMPL_FLAG_IGNORES_DAMAGE);
nvnc_log(NVNC_LOG_INFO, "Choosing %s encoding for client %p",
encoding_to_string(encoding), client);
}
/* The client's damage is exchanged for an empty one */
struct pixman_region16 damage = client->damage;
pixman_region_init(&client->damage);
client->is_updating = true;
client->formats_changed = false;
client->current_fb = fb;
nvnc_fb_hold(fb);
nvnc_fb_ref(fb);
@ -1270,7 +1136,6 @@ static void process_fb_update_requests(struct nvnc_client* client)
nvnc_log(NVNC_LOG_ERROR, "Failed to encode current frame");
client_unref(client);
client->is_updating = false;
client->formats_changed = false;
assert(client->current_fb);
nvnc_fb_release(client->current_fb);
nvnc_fb_unref(client->current_fb);
@ -1748,7 +1613,7 @@ static void on_client_event(struct stream* stream, enum stream_event event)
if (event == STREAM_EVENT_REMOTE_CLOSED) {
nvnc_log(NVNC_LOG_INFO, "Client %p (%d) hung up", client, client->ref);
defer_client_close(client);
nvnc_client_close(client);
return;
}
@ -1795,6 +1660,24 @@ static void on_client_event(struct stream* stream, enum stream_event event)
client->buffer_index = 0;
}
static void record_peer_hostname(int fd, struct nvnc_client* client)
{
struct sockaddr_storage storage;
struct sockaddr* peer = (struct sockaddr*)&storage;
socklen_t peerlen = sizeof(storage);
if (getpeername(fd, peer, &peerlen) == 0) {
if (peer->sa_family == AF_UNIX) {
snprintf(client->hostname, sizeof(client->hostname),
"unix domain socket");
} else {
getnameinfo(peer, peerlen,
client->hostname, sizeof(client->hostname),
NULL, 0, // no need for port
0);
}
}
}
static void on_connection(void* obj)
{
struct nvnc* server = aml_get_userdata(obj);
@ -1806,7 +1689,6 @@ static void on_connection(void* obj)
client->ref = 1;
client->server = server;
client->quality = 10; /* default to lossless */
client->led_state = -1; /* trigger sending of initial state */
int fd = accept(server->fd, NULL, 0);
if (fd < 0) {
@ -1817,6 +1699,8 @@ static void on_connection(void* obj)
int one = 1;
setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one));
record_peer_hostname(fd, client);
#ifdef ENABLE_WEBSOCKET
if (server->socket_type == NVNC__SOCKET_WEBSOCKET)
{
@ -1851,14 +1735,7 @@ static void on_connection(void* obj)
client->state = VNC_CLIENT_STATE_WAITING_FOR_VERSION;
char ip_address[256];
struct sockaddr_storage addr;
socklen_t addrlen = sizeof(addr);
nvnc_client_get_address(client, (struct sockaddr*)&addr, &addrlen);
sockaddr_to_string(ip_address, sizeof(ip_address),
(struct sockaddr*)&addr);
nvnc_log(NVNC_LOG_INFO, "New client connection from %s: %p (ref %d)",
ip_address, client, client->ref);
nvnc_log(NVNC_LOG_INFO, "New client connection from %s: %p (ref %d)", client->hostname, client, client->ref);
return;
@ -1884,11 +1761,6 @@ static void sockaddr_to_string(char* dst, size_t sz, const struct sockaddr* addr
case AF_INET6:
inet_ntop(addr->sa_family, &sa_in6->sin6_addr, dst, sz);
break;
default:
nvnc_log(NVNC_LOG_DEBUG,
"Don't know how to convert sa_family %d to string",
addr->sa_family);
break;
}
}
@ -1975,7 +1847,7 @@ static int bind_address_unix(const char* name)
}
static int bind_address(const char* name, uint16_t port,
int fd, enum nvnc__socket_type type)
enum nvnc__socket_type type)
{
switch (type) {
case NVNC__SOCKET_TCP:
@ -1983,9 +1855,6 @@ static int bind_address(const char* name, uint16_t port,
return bind_address_tcp(name, port);
case NVNC__SOCKET_UNIX:
return bind_address_unix(name);
case NVNC__SOCKET_FROM_FD:
// nothing to bind
return fd;
}
nvnc_log(NVNC_LOG_PANIC, "Unknown socket address type");
@ -1993,7 +1862,7 @@ static int bind_address(const char* name, uint16_t port,
}
static struct nvnc* open_common(const char* address, uint16_t port,
int fd, enum nvnc__socket_type type)
enum nvnc__socket_type type)
{
nvnc__log_init();
@ -2009,7 +1878,7 @@ static struct nvnc* open_common(const char* address, uint16_t port,
LIST_INIT(&self->clients);
self->fd = bind_address(address, port, fd, type);
self->fd = bind_address(address, port, type);
if (self->fd < 0)
goto bind_failure;
@ -2042,14 +1911,14 @@ bind_failure:
EXPORT
struct nvnc* nvnc_open(const char* address, uint16_t port)
{
return open_common(address, port, -1, NVNC__SOCKET_TCP);
return open_common(address, port, NVNC__SOCKET_TCP);
}
EXPORT
struct nvnc* nvnc_open_websocket(const char *address, uint16_t port)
{
#ifdef ENABLE_WEBSOCKET
return open_common(address, port, -1, NVNC__SOCKET_WEBSOCKET);
return open_common(address, port, NVNC__SOCKET_WEBSOCKET);
#else
return NULL;
#endif
@ -2058,13 +1927,7 @@ struct nvnc* nvnc_open_websocket(const char *address, uint16_t port)
EXPORT
struct nvnc* nvnc_open_unix(const char* address)
{
return open_common(address, 0, -1, NVNC__SOCKET_UNIX);
}
EXPORT
struct nvnc* nvnc_open_from_fd(int fd)
{
return open_common(NULL, 0, fd, NVNC__SOCKET_FROM_FD);
return open_common(address, 0, NVNC__SOCKET_UNIX);
}
static void unlink_fd_path(int fd)
@ -2102,11 +1965,6 @@ void nvnc_close(struct nvnc* self)
unlink_fd_path(self->fd);
close(self->fd);
#ifdef HAVE_CRYPTO
crypto_rsa_priv_key_del(self->rsa_priv);
crypto_rsa_pub_key_del(self->rsa_pub);
#endif
#ifdef ENABLE_TLS
if (self->tls_creds) {
gnutls_certificate_free_credentials(self->tls_creds);
@ -2120,8 +1978,6 @@ void nvnc_close(struct nvnc* self)
static void complete_fb_update(struct nvnc_client* client)
{
if (!client->is_updating)
return;
client->is_updating = false;
assert(client->current_fb);
nvnc_fb_release(client->current_fb);
@ -2129,7 +1985,7 @@ static void complete_fb_update(struct nvnc_client* client)
client->current_fb = NULL;
process_fb_update_requests(client);
client_unref(client);
DTRACE_PROBE1(neatvnc, update_fb_done, client);
DTRACE_PROBE2(neatvnc, update_fb_done, client, pts);
}
static void on_write_frame_done(void* userdata, enum stream_req_status status)
@ -2139,7 +1995,7 @@ static void on_write_frame_done(void* userdata, enum stream_req_status status)
}
static enum rfb_encodings choose_frame_encoding(struct nvnc_client* client,
const struct nvnc_fb* fb)
struct nvnc_fb* fb)
{
for (size_t i = 0; i < client->n_encodings; ++i)
switch (client->encodings[i]) {
@ -2179,17 +2035,6 @@ static void finish_fb_update(struct nvnc_client* client, struct rcbuf* payload,
if (client->net_stream->state == STREAM_STATE_CLOSED)
goto complete;
if (client->formats_changed) {
/* Client has requested new pixel format or encoding in the
* meantime, so it probably won't know what to do with this
* frame. Pending requests get incremented because this one is
* dropped.
*/
nvnc_log(NVNC_LOG_DEBUG, "Client changed pixel format or encoding with in-flight buffer");
client->n_pending_requests++;
goto complete;
}
DTRACE_PROBE2(neatvnc, send_fb_start, client, pts);
n_rects += will_send_pts(client, pts) ? 1 : 0;
struct rfb_server_fb_update_msg update_msg = {
@ -2266,36 +2111,20 @@ static int send_desktop_resize(struct nvnc_client* client, struct nvnc_fb* fb)
return 0;
}
static bool send_ext_support_frame(struct nvnc_client* client)
static int send_qemu_key_ext_frame(struct nvnc_client* client)
{
int has_qemu_ext =
client_has_encoding(client, RFB_ENCODING_QEMU_EXT_KEY_EVENT);
int has_ntp = client_has_encoding(client, RFB_ENCODING_NTP);
int n_rects = has_qemu_ext + has_ntp;
if (n_rects == 0)
return false;
struct rfb_server_fb_update_msg head = {
.type = RFB_SERVER_TO_CLIENT_FRAMEBUFFER_UPDATE,
.n_rects = htons(n_rects),
.n_rects = htons(1),
};
stream_write(client->net_stream, &head, sizeof(head), NULL, NULL);
if (has_qemu_ext) {
struct rfb_server_fb_rect rect = {
.encoding = htonl(RFB_ENCODING_QEMU_EXT_KEY_EVENT),
};
stream_write(client->net_stream, &rect, sizeof(rect), NULL, NULL);
}
if (has_ntp) {
struct rfb_server_fb_rect rect = {
.encoding = htonl(RFB_ENCODING_NTP),
};
stream_write(client->net_stream, &head, sizeof(head), NULL, NULL);
stream_write(client->net_stream, &rect, sizeof(rect), NULL, NULL);
}
return true;
return 0;
}
void nvnc__damage_region(struct nvnc* self, const struct pixman_region16* damage)
@ -2403,9 +2232,10 @@ struct nvnc* nvnc_client_get_server(const struct nvnc_client* client)
}
EXPORT
int nvnc_client_get_address(const struct nvnc_client* client,
struct sockaddr* restrict addr, socklen_t* restrict addrlen) {
return getpeername(client->net_stream->fd, addr, addrlen);
const char* nvnc_client_get_hostname(const struct nvnc_client* client) {
if (client->hostname[0] == '\0')
return NULL;
return client->hostname;
}
EXPORT
@ -2445,60 +2275,6 @@ bool nvnc_client_supports_cursor(const struct nvnc_client* client)
return false;
}
static bool client_send_led_state(struct nvnc_client* client)
{
if (client->pending_led_state == client->led_state)
return false;
bool have_qemu_led_state =
client_has_encoding(client, RFB_ENCODING_QEMU_LED_STATE);
bool have_vmware_led_state =
client_has_encoding(client, RFB_ENCODING_VMWARE_LED_STATE);
if (!have_qemu_led_state && !have_vmware_led_state)
return false;
nvnc_log(NVNC_LOG_DEBUG, "Keyboard LED state changed: %x -> %x",
client->led_state, client->pending_led_state);
struct vec payload;
vec_init(&payload, 4096);
struct rfb_server_fb_update_msg head = {
.type = RFB_SERVER_TO_CLIENT_FRAMEBUFFER_UPDATE,
.n_rects = htons(1),
};
struct rfb_server_fb_rect rect = {
.encoding = htonl(RFB_ENCODING_QEMU_LED_STATE),
};
vec_append(&payload, &head, sizeof(head));
vec_append(&payload, &rect, sizeof(rect));
if (have_qemu_led_state) {
uint8_t data = client->pending_led_state;
vec_append(&payload, &data, sizeof(data));
} else if (have_vmware_led_state) {
uint32_t data = htonl(client->pending_led_state);
vec_append(&payload, &data, sizeof(data));
}
stream_send(client->net_stream, rcbuf_new(payload.data, payload.len),
NULL, NULL);
client->led_state = client->pending_led_state;
return true;
}
EXPORT
void nvnc_client_set_led_state(struct nvnc_client* client,
enum nvnc_keyboard_led_state state)
{
client->pending_led_state = state;
process_fb_update_requests(client);
}
EXPORT
void nvnc_set_name(struct nvnc* self, const char* name)
{
@ -2517,8 +2293,9 @@ bool nvnc_has_auth(void)
}
EXPORT
int nvnc_set_tls_creds(struct nvnc* self, const char* privkey_path,
const char* cert_path)
int nvnc_enable_auth(struct nvnc* self, const char* privkey_path,
const char* cert_path, nvnc_auth_fn auth_fn,
void* userdata)
{
#ifdef ENABLE_TLS
if (self->tls_creds)
@ -2549,6 +2326,9 @@ int nvnc_set_tls_creds(struct nvnc* self, const char* privkey_path,
goto cert_set_failure;
}
self->auth_fn = auth_fn;
self->auth_ud = userdata;
return 0;
cert_set_failure:
@ -2561,13 +2341,10 @@ cert_alloc_failure:
}
EXPORT
int nvnc_enable_auth(struct nvnc* self, enum nvnc_auth_flags flags,
nvnc_auth_fn auth_fn, void* userdata)
int nvnc_enable_auth2(struct nvnc* self, nvnc_auth_fn auth_fn, void* userdata)
{
#ifdef HAVE_CRYPTO
self->auth_flags = flags;
self->auth_fn = auth_fn;
self->auth_ud = userdata;
return 0;
#endif
return -1;

View File

@ -170,16 +170,13 @@ static int stream_rsa_aes_send(struct stream* base, struct rcbuf* payload,
vec_append(&buf, mac, sizeof(mac));
}
size_t payload_size = payload->size;
rcbuf_unref(payload);
int r = stream_tcp_send(base, rcbuf_new(buf.data, buf.len), on_done,
userdata);
if (r < 0) {
return r;
}
return payload_size;
return payload->size;
}
static struct stream_impl impl = {

View File

@ -302,14 +302,13 @@ static void tight_encode_tile_basic(struct tight_encoder* self,
else
memcpy(&cfmt, &self->dfmt, sizeof(cfmt));
uint8_t* addr = nvnc_fb_get_addr(self->fb);
int32_t bpp = self->sfmt.bits_per_pixel / 8;
int32_t byte_stride = nvnc_fb_get_stride(self->fb) * bpp;
int32_t xoff = x * bpp;
uint32_t* addr = nvnc_fb_get_addr(self->fb);
int32_t stride = nvnc_fb_get_stride(self->fb);
// TODO: Limit width and hight to the sides
for (uint32_t y = y_start; y < y_start + height; ++y) {
uint8_t* img = addr + xoff + y * byte_stride;
pixel_to_cpixel(row, &cfmt, img, &self->sfmt,
void* img = addr + x + y * stride;
pixel32_to_cpixel(row, &cfmt, img, &self->sfmt,
bytes_per_cpixel, width);
// TODO What to do if the buffer fills up?
@ -336,10 +335,6 @@ static enum TJPF tight_get_jpeg_pixfmt(uint32_t fourcc)
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
return TJPF_RGBX;
case DRM_FORMAT_BGR888:
return TJPF_RGB;
case DRM_FORMAT_RGB888:
return TJPF_BGR;
}
return TJPF_UNKNOWN;
@ -365,16 +360,14 @@ static int tight_encode_tile_jpeg(struct tight_encoder* self,
if (!handle)
return -1;
uint8_t* addr = nvnc_fb_get_addr(self->fb);
int32_t bpp = self->sfmt.bits_per_pixel / 8;
int32_t byte_stride = nvnc_fb_get_stride(self->fb) * bpp;
int32_t xoff = x * bpp;
uint8_t* img = addr + xoff + y * byte_stride;
uint32_t* addr = nvnc_fb_get_addr(self->fb);
int32_t stride = nvnc_fb_get_stride(self->fb);
void* img = (uint32_t*)addr + x + y * stride;
enum TJSAMP subsampling = (quality == 9) ? TJSAMP_444 : TJSAMP_420;
int rc = -1;
rc = tjCompress2(handle, img, width, byte_stride, height, tjfmt, &buffer,
rc = tjCompress2(handle, img, width, stride * 4, height, tjfmt, &buffer,
&size, subsampling, quality, TJFLAG_FASTDCT);
if (rc < 0) {
nvnc_log(NVNC_LOG_ERROR, "Failed to encode tight JPEG box: %s",

View File

@ -1,19 +1,3 @@
/*
* Copyright (c) 2023 Andri Yngvason
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "websocket.h"
#include <stdint.h>

View File

@ -1,19 +1,3 @@
/*
* Copyright (c) 2023 Andri Yngvason
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "websocket.h"
#include "http.h"
#include "crypto.h"

View File

@ -61,32 +61,32 @@ static inline struct zrle_encoder* zrle_encoder(struct encoder* encoder)
return (struct zrle_encoder*)encoder;
}
static inline int find_colour_in_palette(uint8_t* palette, int len,
const uint8_t* colour, int bpp)
static inline int find_colour_in_palette(uint32_t* palette, int len,
uint32_t colour)
{
for (int i = 0; i < len; ++i)
if (memcmp(palette + i * bpp, colour, bpp) == 0)
if (palette[i] == colour)
return i;
return -1;
}
static int zrle_get_tile_palette(uint8_t* palette, const uint8_t* src,
const int src_bpp, size_t length)
static int zrle_get_tile_palette(uint32_t* palette, const uint32_t* src,
size_t length)
{
int n = 0;
/* TODO: Maybe ignore the alpha channel */
memcpy(palette + (n++ * src_bpp), src, src_bpp);
palette[n++] = src[0];
for (size_t i = 0; i < length; ++i) {
const uint8_t* colour_addr = src + i * src_bpp;
uint32_t colour = src[i];
if (find_colour_in_palette(palette, n, colour_addr, src_bpp) < 0) {
if (find_colour_in_palette(palette, n, colour) < 0) {
if (n >= 16)
return -1;
memcpy(palette + (n++ * src_bpp), colour_addr, src_bpp);
palette[n++] = colour;
}
}
@ -95,14 +95,14 @@ static int zrle_get_tile_palette(uint8_t* palette, const uint8_t* src,
static void zrle_encode_unichrome_tile(struct vec* dst,
const struct rfb_pixel_format* dst_fmt,
uint8_t* colour,
uint32_t colour,
const struct rfb_pixel_format* src_fmt)
{
int bytes_per_cpixel = calc_bytes_per_cpixel(dst_fmt);
vec_fast_append_8(dst, 1);
pixel_to_cpixel(((uint8_t*)dst->data) + 1, dst_fmt, colour, src_fmt,
pixel32_to_cpixel(((uint8_t*)dst->data) + 1, dst_fmt, &colour, src_fmt,
bytes_per_cpixel, 1);
dst->len += bytes_per_cpixel;
@ -127,16 +127,15 @@ static void encode_run_length(struct vec* dst, uint8_t index, int run_length)
static void zrle_encode_packed_tile(struct vec* dst,
const struct rfb_pixel_format* dst_fmt,
const uint8_t* src,
const uint32_t* src,
const struct rfb_pixel_format* src_fmt,
size_t length, uint8_t* palette,
size_t length, uint32_t* palette,
int palette_size)
{
int bytes_per_cpixel = calc_bytes_per_cpixel(dst_fmt);
int src_bpp = src_fmt->bits_per_pixel / 8;
uint8_t cpalette[16 * 3];
pixel_to_cpixel(cpalette, dst_fmt, palette, src_fmt,
pixel32_to_cpixel((uint8_t*)cpalette, dst_fmt, palette, src_fmt,
bytes_per_cpixel, palette_size);
vec_fast_append_8(dst, 128 | palette_size);
@ -147,46 +146,45 @@ static void zrle_encode_packed_tile(struct vec* dst,
int run_length = 1;
for (size_t i = 1; i < length; ++i) {
if (memcmp(src + i * src_bpp, src + (i - 1) * src_bpp, src_bpp) == 0) {
if (src[i] == src[i - 1]) {
run_length++;
continue;
}
index = find_colour_in_palette(palette, palette_size, src + (i - 1) * src_bpp, src_bpp);
index = find_colour_in_palette(palette, palette_size, src[i - 1]);
encode_run_length(dst, index, run_length);
run_length = 1;
}
if (run_length > 0) {
index = find_colour_in_palette(palette, palette_size,
src + (length - 1) * src_bpp, src_bpp);
src[length - 1]);
encode_run_length(dst, index, run_length);
}
}
static void zrle_copy_tile(uint8_t* tile, const uint8_t* src, int src_bpp,
int stride, int width, int height)
static void zrle_copy_tile(uint32_t* dst, const uint32_t* src, int stride,
int width, int height)
{
int byte_stride = stride * src_bpp;
for (int y = 0; y < height; ++y)
memcpy(tile + y * width * src_bpp, src + y * byte_stride, width * src_bpp);
memcpy(dst + y * width, src + y * stride, width * 4);
}
static void zrle_encode_tile(struct vec* dst,
const struct rfb_pixel_format* dst_fmt,
const uint8_t* src,
const uint32_t* src,
const struct rfb_pixel_format* src_fmt,
size_t length)
{
int bytes_per_cpixel = calc_bytes_per_cpixel(dst_fmt);
int src_bpp = src_fmt->bits_per_pixel / 8;
vec_clear(dst);
uint8_t palette[16 * 4];
int palette_size = zrle_get_tile_palette(palette, src, src_bpp, length);
uint32_t palette[16];
int palette_size = zrle_get_tile_palette(palette, src, length);
if (palette_size == 1) {
zrle_encode_unichrome_tile(dst, dst_fmt, &palette[0], src_fmt);
zrle_encode_unichrome_tile(dst, dst_fmt, palette[0], src_fmt);
return;
}
@ -198,7 +196,7 @@ static void zrle_encode_tile(struct vec* dst,
vec_fast_append_8(dst, 0);
pixel_to_cpixel(((uint8_t*)dst->data) + 1, dst_fmt, (uint8_t*)src, src_fmt,
pixel32_to_cpixel(((uint8_t*)dst->data) + 1, dst_fmt, src, src_fmt,
bytes_per_cpixel, length);
dst->len += bytes_per_cpixel * length;
@ -237,13 +235,12 @@ static int zrle_encode_box(struct zrle_encoder* self, struct vec* out,
{
int r = -1;
int bytes_per_cpixel = calc_bytes_per_cpixel(dst_fmt);
int src_bpp = src_fmt->bits_per_pixel / 8;
struct vec in;
uint16_t x_pos = self->encoder.x_pos;
uint16_t y_pos = self->encoder.y_pos;
uint8_t* tile = malloc(TILE_LENGTH * TILE_LENGTH * 4);
uint32_t* tile = malloc(TILE_LENGTH * TILE_LENGTH * 4);
if (!tile)
goto failure;
@ -271,11 +268,10 @@ static int zrle_encode_box(struct zrle_encoder* self, struct vec* out,
? TILE_LENGTH
: height - tile_y;
int y_off = (y + tile_y) * stride * src_bpp;
int x_off = (x + tile_x) * src_bpp;
int y_off = y + tile_y;
zrle_copy_tile(tile,
((uint8_t*)fb->addr) + x_off + y_off, src_bpp,
((uint32_t*)fb->addr) + x + tile_x + y_off * stride,
stride, tile_width, tile_height);
zrle_encode_tile(&in, dst_fmt, tile, src_fmt,

View File

@ -7,7 +7,6 @@ pixels = executable('pixels',
dependencies: [
pixman,
libdrm_inc,
libm,
],
)
test('pixels', pixels)

View File

@ -22,11 +22,10 @@
#define UDIV_UP(a, b) (((a) + (b) - 1) / (b))
#define ARRAY_LEN(a) (sizeof(a) / (sizeof(a[0])))
static bool test_pixel_to_cpixel_4bpp(void)
static bool test_pixel32_to_cpixel_4bpp(void)
{
uint32_t src = u32_le(0x11223344u);
uint32_t dst;
uint8_t* src_addr = (uint8_t*)&src;
struct rfb_pixel_format dstfmt = { 0 }, srcfmt = { 0 };
@ -34,63 +33,25 @@ static bool test_pixel_to_cpixel_4bpp(void)
dst = 0;
rfb_pixfmt_from_fourcc(&srcfmt, DRM_FORMAT_RGBA8888);
pixel_to_cpixel((uint8_t*)&dst, &dstfmt, src_addr, &srcfmt, 4, 1);
pixel32_to_cpixel((uint8_t*)&dst, &dstfmt, &src, &srcfmt, 4, 1);
if ((src & 0xffffff00u) != (dst & 0xffffff00u))
return false;
dst = 0;
rfb_pixfmt_from_fourcc(&dstfmt, DRM_FORMAT_ABGR8888);
pixel_to_cpixel((uint8_t*)&dst, &dstfmt, src_addr, &srcfmt, 4, 1);
pixel32_to_cpixel((uint8_t*)&dst, &dstfmt, &src, &srcfmt, 4, 1);
if (dst != u32_le(0x00332211u))
return false;
dst = 0;
rfb_pixfmt_from_fourcc(&dstfmt, DRM_FORMAT_ARGB8888);
pixel_to_cpixel((uint8_t*)&dst, &dstfmt, src_addr, &srcfmt, 4, 1);
pixel32_to_cpixel((uint8_t*)&dst, &dstfmt, &src, &srcfmt, 4, 1);
if (dst != u32_le(0x00112233u))
return false;
dst = 0;
rfb_pixfmt_from_fourcc(&dstfmt, DRM_FORMAT_BGRA8888);
pixel_to_cpixel((uint8_t*)&dst, &dstfmt, src_addr, &srcfmt, 4, 1);
if (dst != u32_le(0x33221100u))
return false;
return true;
}
static bool test_pixel_to_cpixel_3bpp(void)
{
//44 is extra data that should not be copied anywhere below.
uint32_t src = u32_le(0x44112233u);
uint32_t dst;
uint8_t* src_addr = (uint8_t*)&src;
struct rfb_pixel_format dstfmt = { 0 }, srcfmt = { 0 };
rfb_pixfmt_from_fourcc(&srcfmt, DRM_FORMAT_RGB888);
dst = 0;
rfb_pixfmt_from_fourcc(&dstfmt, DRM_FORMAT_RGBA8888);
pixel_to_cpixel((uint8_t*)&dst, &dstfmt, src_addr, &srcfmt, 4, 1);
if (dst != u32_le(0x11223300u))
return false;
dst = 0;
rfb_pixfmt_from_fourcc(&dstfmt, DRM_FORMAT_ABGR8888);
pixel_to_cpixel((uint8_t*)&dst, &dstfmt, src_addr, &srcfmt, 4, 1);
if (dst != u32_le(0x00332211u))
return false;
dst = 0;
rfb_pixfmt_from_fourcc(&dstfmt, DRM_FORMAT_ARGB8888);
pixel_to_cpixel((uint8_t*)&dst, &dstfmt, src_addr, &srcfmt, 4, 1);
if (dst != u32_le(0x00112233u))
return false;
dst = 0;
rfb_pixfmt_from_fourcc(&dstfmt, DRM_FORMAT_BGRA8888);
pixel_to_cpixel((uint8_t*)&dst, &dstfmt, src_addr, &srcfmt, 4, 1);
pixel32_to_cpixel((uint8_t*)&dst, &dstfmt, &src, &srcfmt, 4, 1);
if (dst != u32_le(0x33221100u))
return false;
@ -212,8 +173,7 @@ static bool test_rfb_pixfmt_to_string(void)
int main()
{
bool ok = test_pixel_to_cpixel_4bpp() &&
test_pixel_to_cpixel_3bpp() &&
bool ok = test_pixel32_to_cpixel_4bpp() &&
test_fourcc_to_pixman_fmt() &&
test_extract_alpha_mask_rgba8888() &&
test_drm_format_to_string() &&