neatvnc/src/zrle.c

441 lines
11 KiB
C
Raw Normal View History

2019-09-07 16:51:07 +00:00
/*
* Copyright (c) 2019 Andri Yngvason
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "rfb-proto.h"
2019-08-25 16:40:59 +00:00
#include "util.h"
#include "vec.h"
2019-08-27 21:49:28 +00:00
#include "zrle.h"
2019-08-31 15:50:23 +00:00
#include "miniz.h"
#include "neatvnc.h"
#include <stdint.h>
#include <unistd.h>
#include <stdlib.h>
#include <endian.h>
2019-09-07 18:42:55 +00:00
#include <stdbool.h>
#include <assert.h>
2019-08-25 16:40:59 +00:00
#include <uv.h>
#include <pixman.h>
2019-09-07 18:42:55 +00:00
#define POPCOUNT(x) __builtin_popcount(x)
#define UDIV_UP(a, b) (((a) + (b) - 1) / (b))
2019-08-17 20:49:11 +00:00
void pixel_format_into_native(struct rfb_pixel_format *fmt)
{
#if __BYTE_ORDER__ == __LITTLE_ENDIAN__
if (!fmt->big_endian_flag)
return;
fmt->big_endian_flag = 0;
#else
if (fmt->big_endian_flag)
return;
fmt->big_endian_flag = 1;
#endif
fmt->red_shift = fmt->bits_per_pixel - fmt->red_shift;
fmt->green_shift = fmt->bits_per_pixel - fmt->green_shift;
fmt->blue_shift = fmt->bits_per_pixel - fmt->blue_shift;
}
void pixel32_to_cpixel(uint8_t *restrict dst,
const struct rfb_pixel_format* dst_fmt,
const uint32_t *restrict src,
const struct rfb_pixel_format* src_fmt,
size_t bytes_per_cpixel, size_t len)
{
2019-08-17 17:12:17 +00:00
assert(src_fmt->true_colour_flag);
assert(src_fmt->bits_per_pixel == 32);
assert(src_fmt->depth <= 32);
assert(dst_fmt->true_colour_flag);
assert(dst_fmt->bits_per_pixel <= 32);
assert(dst_fmt->depth <= 24);
assert(bytes_per_cpixel <= 3 && bytes_per_cpixel >= 1);
uint32_t src_red_shift = src_fmt->red_shift;
uint32_t src_green_shift = src_fmt->green_shift;
uint32_t src_blue_shift = src_fmt->blue_shift;
uint32_t dst_red_shift = dst_fmt->red_shift;
uint32_t dst_green_shift = dst_fmt->green_shift;
uint32_t dst_blue_shift = dst_fmt->blue_shift;
uint32_t src_red_max = src_fmt->red_max;
uint32_t src_green_max = src_fmt->green_max;
uint32_t src_blue_max = src_fmt->blue_max;
uint32_t dst_red_max = dst_fmt->red_max;
uint32_t dst_green_max = dst_fmt->green_max;
uint32_t dst_blue_max = dst_fmt->blue_max;
uint32_t src_red_bits = POPCOUNT(src_fmt->red_max);
uint32_t src_green_bits = POPCOUNT(src_fmt->green_max);
uint32_t src_blue_bits = POPCOUNT(src_fmt->blue_max);
uint32_t dst_red_bits = POPCOUNT(dst_fmt->red_max);
uint32_t dst_green_bits = POPCOUNT(dst_fmt->green_max);
uint32_t dst_blue_bits = POPCOUNT(dst_fmt->blue_max);
uint32_t dst_endian_correction;
#define CONVERT_PIXELS(cpx, px) \
{ \
uint32_t r, g, b; \
r = ((px >> src_red_shift) & src_red_max) << dst_red_bits \
>> src_red_bits << dst_red_shift; \
g = ((px >> src_green_shift) & src_green_max) << dst_green_bits \
>> src_green_bits << dst_green_shift; \
b = ((px >> src_blue_shift) & src_blue_max) << dst_blue_bits \
>> src_blue_bits << dst_blue_shift; \
cpx = r | g | b; \
}
switch (bytes_per_cpixel) {
case 3:
2019-08-17 17:12:17 +00:00
if (dst_fmt->bits_per_pixel == 32 && dst_fmt->depth <= 24) {
uint32_t min_dst_shift = dst_red_shift;
if (min_dst_shift > dst_green_shift)
min_dst_shift = dst_green_shift;
if (min_dst_shift > dst_blue_shift)
min_dst_shift = dst_blue_shift;
dst_red_shift -= min_dst_shift;
dst_green_shift -= min_dst_shift;
dst_blue_shift -= min_dst_shift;
}
dst_endian_correction = dst_fmt->big_endian_flag ? 16 : 0;
while (len--) {
uint32_t cpx, px = *src++;
CONVERT_PIXELS(cpx, px)
*dst++ = (cpx >> (16 ^ dst_endian_correction)) & 0xff;
2019-09-08 14:41:22 +00:00
*dst++ = (cpx >> 8) & 0xff;
*dst++ = (cpx >> (0 ^ dst_endian_correction)) & 0xff;
}
break;
case 2:
dst_endian_correction = dst_fmt->big_endian_flag ? 8 : 0;
while (len--) {
uint32_t cpx, px = *src++;
CONVERT_PIXELS(cpx, px)
*dst++ = (cpx >> (8 ^ dst_endian_correction)) & 0xff;
2019-09-08 14:41:22 +00:00
*dst++ = (cpx >> (0 ^ dst_endian_correction)) & 0xff;
}
break;
case 1:
while (len--) {
uint32_t cpx, px = *src++;
CONVERT_PIXELS(cpx, px)
*dst++ = cpx & 0xff;
}
break;
default:
abort();
}
#undef CONVERT_PIXELS
}
2019-09-01 22:56:59 +00:00
static inline int find_colour_in_palette(uint32_t *palette, int len,
uint32_t colour)
{
for (int i = 0; i < len; ++i)
if (palette[i] == colour)
return i;
return -1;
}
int zrle_get_tile_palette(uint32_t *palette, const uint32_t *src, size_t length)
2019-09-01 22:56:59 +00:00
{
int n = 0;
/* TODO: Maybe ignore the alpha channel */
palette[n++] = src[0];
for (size_t i = 0; i < length; ++i) {
uint32_t colour = src[i];
2019-09-01 22:56:59 +00:00
if (find_colour_in_palette(palette, n, colour) < 0) {
if (n >= 16)
return -1;
2019-09-01 22:56:59 +00:00
palette[n++] = colour;
2019-09-01 22:56:59 +00:00
}
}
2019-09-01 22:56:59 +00:00
return n;
}
void zrle_encode_unichrome_tile(struct vec *dst,
const struct rfb_pixel_format *dst_fmt,
uint32_t colour,
const struct rfb_pixel_format *src_fmt)
{
int bytes_per_cpixel = dst_fmt->depth / 8;
vec_fast_append_8(dst, 1);
pixel32_to_cpixel(((uint8_t*)dst->data) + 1, dst_fmt, &colour, src_fmt,
bytes_per_cpixel, 1);
dst->len += bytes_per_cpixel;
}
void encode_run_length(struct vec *dst, uint8_t index, int run_length)
2019-09-01 22:56:59 +00:00
{
if (run_length == 1) {
vec_fast_append_8(dst, index);
return;
}
vec_fast_append_8(dst, index | 128);
while (run_length > 255) {
vec_fast_append_8(dst, 255);
run_length -= 255;
}
vec_fast_append_8(dst, run_length - 1);
}
void zrle_encode_packed_tile(struct vec *dst,
const struct rfb_pixel_format *dst_fmt,
const uint32_t *src,
const struct rfb_pixel_format *src_fmt,
size_t length,
2019-09-01 22:56:59 +00:00
uint32_t *palette, int palette_size)
{
int bytes_per_cpixel = dst_fmt->depth / 8;
uint8_t cpalette[16 * 3];
pixel32_to_cpixel((uint8_t*)cpalette, dst_fmt, palette,
src_fmt, bytes_per_cpixel, palette_size);
vec_fast_append_8(dst, 128 | palette_size);
2019-09-01 22:56:59 +00:00
vec_append(dst, cpalette, palette_size * bytes_per_cpixel);
int index;
int run_length = 1;
2019-09-01 22:56:59 +00:00
for (size_t i = 1; i < length; ++i) {
if (src[i] == src[i - 1]) {
run_length++;
continue;
}
2019-09-01 22:56:59 +00:00
index = find_colour_in_palette(palette, palette_size,
src[i - 1]);
encode_run_length(dst, index, run_length);
run_length = 1;
}
2019-09-01 22:56:59 +00:00
if (run_length > 0) {
index = find_colour_in_palette(palette, palette_size,
src[length - 1]);
2019-09-01 22:56:59 +00:00
encode_run_length(dst, index, run_length);
}
}
void zrle_copy_tile(uint32_t *dst, const uint32_t *src, int stride,
int width, int height, enum nvnc_modifier mod)
{
for (int y = 0; y < height; ++y) {
if (!(mod & NVNC_MOD_Y_INVERT))
memcpy(dst + y * width, src + y * stride, width * 4);
else
memcpy(dst + (height - y - 1) * width, src + y * stride,
width * 4);
}
}
2019-08-27 20:28:12 +00:00
void zrle_encode_tile(struct vec *dst, const struct rfb_pixel_format *dst_fmt,
2019-08-17 20:49:11 +00:00
const uint32_t *src,
const struct rfb_pixel_format *src_fmt,
size_t length)
2019-08-17 20:49:11 +00:00
{
int bytes_per_cpixel = dst_fmt->depth / 8;
2019-08-27 20:28:12 +00:00
vec_clear(dst);
2019-09-01 22:56:59 +00:00
uint32_t palette[16];
int palette_size = zrle_get_tile_palette(palette, src, length);
2019-09-01 22:56:59 +00:00
if (palette_size == 1) {
zrle_encode_unichrome_tile(dst, dst_fmt, palette[0], src_fmt);
return;
}
if (palette_size > 1) {
zrle_encode_packed_tile(dst, dst_fmt, src, src_fmt, length,
palette, palette_size);
2019-09-01 22:56:59 +00:00
return;
}
2019-08-27 20:28:12 +00:00
vec_fast_append_8(dst, 0);
pixel32_to_cpixel(((uint8_t*)dst->data) + 1, dst_fmt, src, src_fmt,
bytes_per_cpixel, length);
2019-08-27 20:28:12 +00:00
dst->len += bytes_per_cpixel * length;
2019-08-17 20:49:11 +00:00
}
2019-08-27 21:04:20 +00:00
int zrle_deflate(struct vec* dst, const struct vec* src, z_stream* zs,
bool flush)
{
int r = Z_STREAM_ERROR;
zs->next_in = src->data;
zs->avail_in = src->len;
do {
if (dst->len == dst->cap && vec_reserve(dst, dst->cap * 2) < 0)
return -1;
zs->next_out = ((Bytef*)dst->data) + dst->len;
zs->avail_out = dst->cap - dst->len;
r = deflate(zs, flush ? Z_SYNC_FLUSH : Z_NO_FLUSH);
2019-08-27 21:04:20 +00:00
assert(r != Z_STREAM_ERROR);
dst->len = zs->next_out - (Bytef*)dst->data;
2019-08-27 21:04:20 +00:00
} while (zs->avail_out == 0);
assert(zs->avail_in == 0);
return 0;
}
2019-08-27 21:49:28 +00:00
int zrle_encode_box(struct vec* out, const struct rfb_pixel_format *dst_fmt,
const struct nvnc_fb *fb,
const struct rfb_pixel_format *src_fmt,
int x, int y, int stride, int width, int height,
z_stream* zs)
{
2019-08-25 16:40:59 +00:00
int r = -1;
int bytes_per_cpixel = dst_fmt->depth / 8;
int chunk_size = 1 + bytes_per_cpixel * 64 * 64;
2019-08-27 20:28:12 +00:00
struct vec in;
2019-08-17 20:49:11 +00:00
uint32_t *tile = malloc(64 * 64 * 4);
if (!tile)
goto failure;
2019-08-27 20:28:12 +00:00
if (vec_init(&in, 1 + bytes_per_cpixel * 64 * 64) < 0)
2019-08-25 16:40:59 +00:00
goto failure;
2019-08-27 21:49:28 +00:00
struct rfb_server_fb_rect rect = {
.encoding = htonl(RFB_ENCODING_ZRLE),
.x = htons(x),
.y = htons(y),
.width = htons(width),
.height = htons(height),
};
r = vec_append(out, &rect, sizeof(rect));
if (r < 0)
goto failure;
2019-08-25 16:40:59 +00:00
/* Reserve space for size */
2019-08-27 21:49:28 +00:00
size_t size_index = out->len;
vec_append_zero(out, 4);
2019-08-25 16:40:59 +00:00
int n_tiles = UDIV_UP(width, 64) * UDIV_UP(height, 64);
for (int i = 0; i < n_tiles; ++i) {
2019-08-29 23:02:42 +00:00
int tile_x = (i % UDIV_UP(width, 64)) * 64;
int tile_y = (i / UDIV_UP(width, 64)) * 64;
2019-08-25 19:17:24 +00:00
int tile_width = width - tile_x >= 64 ? 64 : width - tile_x;
int tile_height = height - tile_y >= 64 ? 64 : height - tile_y;
2019-09-08 23:24:01 +00:00
int y_off = !(fb->nvnc_modifier & NVNC_MOD_Y_INVERT)
? y + tile_y
: fb->height - y - tile_y - tile_height;
zrle_copy_tile(tile,
2019-09-08 23:24:01 +00:00
((uint32_t*)fb->addr) + x + tile_x + y_off * stride,
stride, tile_width, tile_height,
fb->nvnc_modifier);
zrle_encode_tile(&in, dst_fmt, tile, src_fmt,
tile_width * tile_height);
2019-08-25 16:40:59 +00:00
r = zrle_deflate(out, &in, zs, i == n_tiles - 1);
2019-08-27 21:04:20 +00:00
if (r < 0)
goto failure;
2019-08-25 16:40:59 +00:00
}
2019-08-27 21:49:28 +00:00
uint32_t out_size = htonl(out->len - size_index - 4);
memcpy(((uint8_t*)out->data) + size_index, &out_size, sizeof(out_size));
2019-08-25 16:40:59 +00:00
failure:
2019-08-27 20:28:12 +00:00
vec_destroy(&in);
free(tile);
2019-08-25 16:40:59 +00:00
return r;
#undef CHUNK
}
int zrle_encode_frame(z_stream *zs,
struct vec* dst,
2019-08-25 16:40:59 +00:00
const struct rfb_pixel_format *dst_fmt,
const struct nvnc_fb *src,
2019-08-25 16:40:59 +00:00
const struct rfb_pixel_format *src_fmt,
struct pixman_region16 *region)
2019-08-25 16:40:59 +00:00
{
int rc = -1;
int n_rects = 0;
struct pixman_box16 *box = pixman_region_rectangles(region, &n_rects);
if (n_rects > UINT16_MAX) {
box = pixman_region_extents(region);
n_rects = 1;
}
2019-08-25 16:40:59 +00:00
struct rfb_server_fb_update_msg head = {
.type = RFB_SERVER_TO_CLIENT_FRAMEBUFFER_UPDATE,
.n_rects = htons(n_rects),
2019-08-25 16:40:59 +00:00
};
2019-08-27 21:49:28 +00:00
rc = vec_append(dst, &head, sizeof(head));
2019-08-25 16:40:59 +00:00
if (rc < 0)
return -1;
for (int i = 0; i < n_rects; ++i) {
int x = box[i].x1;
int y = box[i].y1;
2019-08-25 19:10:35 +00:00
int box_width = box[i].x2 - x;
int box_height = box[i].y2 - y;
2019-08-27 21:49:28 +00:00
rc = zrle_encode_box(dst, dst_fmt, src, src_fmt, x, y,
src->width, box_width, box_height, zs);
if (rc < 0)
return -1;
2019-08-25 16:40:59 +00:00
}
return 0;
}