2023-04-06 11:50:44 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2020 - 2023 Andri Yngvason
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
|
|
|
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
|
|
|
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
|
|
|
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
|
|
|
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
|
|
|
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
|
|
|
* PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <sys/uio.h>
|
|
|
|
#include <limits.h>
|
|
|
|
#include <aml.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <poll.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
|
|
|
|
#include "rcbuf.h"
|
|
|
|
#include "stream.h"
|
|
|
|
#include "stream-common.h"
|
2023-09-03 17:34:06 +00:00
|
|
|
#include "stream-tcp.h"
|
2023-04-06 11:50:44 +00:00
|
|
|
#include "sys/queue.h"
|
2023-08-13 12:11:56 +00:00
|
|
|
#include "crypto.h"
|
|
|
|
#include "neatvnc.h"
|
2023-04-06 11:50:44 +00:00
|
|
|
|
2023-05-30 08:40:56 +00:00
|
|
|
static_assert(sizeof(struct stream) <= STREAM_ALLOC_SIZE,
|
|
|
|
"struct stream has grown too large, increase STREAM_ALLOC_SIZE");
|
|
|
|
|
2023-08-13 12:11:56 +00:00
|
|
|
static struct rcbuf* encrypt_rcbuf(struct stream* self, struct rcbuf* payload)
|
|
|
|
{
|
2023-08-15 22:56:42 +00:00
|
|
|
struct vec ciphertext = {};
|
|
|
|
crypto_cipher_encrypt(self->cipher, &ciphertext, payload->payload,
|
2023-08-13 12:11:56 +00:00
|
|
|
payload->size);
|
2023-08-15 22:56:42 +00:00
|
|
|
struct rcbuf* result = rcbuf_new(ciphertext.data, ciphertext.len);
|
2023-08-13 12:11:56 +00:00
|
|
|
rcbuf_unref(payload);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2023-09-03 17:34:06 +00:00
|
|
|
int stream_tcp_close(struct stream* self)
|
2023-04-06 11:50:44 +00:00
|
|
|
{
|
|
|
|
if (self->state == STREAM_STATE_CLOSED)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
self->state = STREAM_STATE_CLOSED;
|
|
|
|
|
|
|
|
while (!TAILQ_EMPTY(&self->send_queue)) {
|
|
|
|
struct stream_req* req = TAILQ_FIRST(&self->send_queue);
|
|
|
|
TAILQ_REMOVE(&self->send_queue, req, link);
|
|
|
|
stream_req__finish(req, STREAM_REQ_FAILED);
|
|
|
|
}
|
|
|
|
|
|
|
|
aml_stop(aml_get_default(), self->handler);
|
|
|
|
close(self->fd);
|
|
|
|
self->fd = -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-09-03 17:34:06 +00:00
|
|
|
void stream_tcp_destroy(struct stream* self)
|
2023-04-06 11:50:44 +00:00
|
|
|
{
|
2023-08-13 12:11:56 +00:00
|
|
|
vec_destroy(&self->tmp_buf);
|
|
|
|
crypto_cipher_del(self->cipher);
|
2023-04-06 11:50:44 +00:00
|
|
|
stream_close(self);
|
|
|
|
aml_unref(self->handler);
|
2023-04-11 20:32:37 +00:00
|
|
|
free(self);
|
2023-04-06 11:50:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int stream_tcp__flush(struct stream* self)
|
|
|
|
{
|
2023-04-07 10:50:10 +00:00
|
|
|
if (self->cork)
|
|
|
|
return 0;
|
|
|
|
|
2023-04-06 11:50:44 +00:00
|
|
|
static struct iovec iov[IOV_MAX];
|
|
|
|
size_t n_msgs = 0;
|
|
|
|
ssize_t bytes_sent;
|
|
|
|
|
|
|
|
struct stream_req* req;
|
|
|
|
TAILQ_FOREACH(req, &self->send_queue, link) {
|
2023-04-08 13:02:43 +00:00
|
|
|
if (req->exec) {
|
|
|
|
if (req->payload)
|
|
|
|
rcbuf_unref(req->payload);
|
2023-08-13 12:11:56 +00:00
|
|
|
struct rcbuf* payload = req->exec(self, req->userdata);
|
|
|
|
req->payload = self->cipher ?
|
|
|
|
encrypt_rcbuf(self, payload) : payload;
|
2023-04-08 13:02:43 +00:00
|
|
|
}
|
|
|
|
|
2023-04-06 11:50:44 +00:00
|
|
|
iov[n_msgs].iov_base = req->payload->payload;
|
|
|
|
iov[n_msgs].iov_len = req->payload->size;
|
|
|
|
|
|
|
|
if (++n_msgs >= IOV_MAX)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n_msgs == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
struct msghdr msghdr = {
|
|
|
|
.msg_iov = iov,
|
|
|
|
.msg_iovlen = n_msgs,
|
|
|
|
};
|
|
|
|
bytes_sent = sendmsg(self->fd, &msghdr, MSG_NOSIGNAL);
|
|
|
|
if (bytes_sent < 0) {
|
|
|
|
if (errno == EAGAIN || errno == EWOULDBLOCK) {
|
|
|
|
stream__poll_rw(self);
|
|
|
|
errno = EAGAIN;
|
2023-04-10 11:36:56 +00:00
|
|
|
bytes_sent = 0;
|
2023-04-06 11:50:44 +00:00
|
|
|
} else if (errno == EPIPE) {
|
|
|
|
stream__remote_closed(self);
|
|
|
|
errno = EPIPE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return bytes_sent;
|
|
|
|
}
|
|
|
|
|
|
|
|
self->bytes_sent += bytes_sent;
|
|
|
|
|
|
|
|
ssize_t bytes_left = bytes_sent;
|
|
|
|
|
|
|
|
struct stream_req* tmp;
|
|
|
|
TAILQ_FOREACH_SAFE(req, &self->send_queue, link, tmp) {
|
|
|
|
bytes_left -= req->payload->size;
|
|
|
|
|
|
|
|
if (bytes_left >= 0) {
|
|
|
|
TAILQ_REMOVE(&self->send_queue, req, link);
|
|
|
|
stream_req__finish(req, STREAM_REQ_DONE);
|
|
|
|
} else {
|
2023-04-08 13:02:43 +00:00
|
|
|
if (req->exec) {
|
|
|
|
free(req->userdata);
|
|
|
|
req->userdata = NULL;
|
|
|
|
req->exec = NULL;
|
|
|
|
}
|
2023-04-06 11:50:44 +00:00
|
|
|
char* p = req->payload->payload;
|
|
|
|
size_t s = req->payload->size;
|
|
|
|
memmove(p, p + s + bytes_left, -bytes_left);
|
|
|
|
req->payload->size = -bytes_left;
|
|
|
|
stream__poll_rw(self);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bytes_left <= 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bytes_left == 0 && self->state != STREAM_STATE_CLOSED)
|
|
|
|
stream__poll_r(self);
|
|
|
|
|
|
|
|
assert(bytes_left <= 0);
|
|
|
|
|
|
|
|
return bytes_sent;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void stream_tcp__on_readable(struct stream* self)
|
|
|
|
{
|
|
|
|
switch (self->state) {
|
|
|
|
case STREAM_STATE_NORMAL:
|
|
|
|
/* fallthrough */
|
|
|
|
if (self->on_event)
|
|
|
|
self->on_event(self, STREAM_EVENT_READ);
|
|
|
|
break;
|
|
|
|
case STREAM_STATE_CLOSED:
|
|
|
|
break;
|
|
|
|
default:;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void stream_tcp__on_writable(struct stream* self)
|
|
|
|
{
|
|
|
|
switch (self->state) {
|
|
|
|
case STREAM_STATE_NORMAL:
|
|
|
|
/* fallthrough */
|
|
|
|
stream_tcp__flush(self);
|
|
|
|
break;
|
|
|
|
case STREAM_STATE_CLOSED:
|
|
|
|
break;
|
|
|
|
default:;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void stream_tcp__on_event(void* obj)
|
|
|
|
{
|
|
|
|
struct stream* self = aml_get_userdata(obj);
|
|
|
|
uint32_t events = aml_get_revents(obj);
|
|
|
|
|
|
|
|
if (events & AML_EVENT_READ)
|
|
|
|
stream_tcp__on_readable(self);
|
|
|
|
|
|
|
|
if (events & AML_EVENT_WRITE)
|
|
|
|
stream_tcp__on_writable(self);
|
|
|
|
}
|
|
|
|
|
2023-09-03 17:34:06 +00:00
|
|
|
ssize_t stream_tcp_read(struct stream* self, void* dst, size_t size)
|
2023-04-06 11:50:44 +00:00
|
|
|
{
|
|
|
|
if (self->state != STREAM_STATE_NORMAL)
|
|
|
|
return -1;
|
|
|
|
|
2023-08-13 12:11:56 +00:00
|
|
|
uint8_t* read_buffer = dst;
|
|
|
|
|
|
|
|
if (self->cipher) {
|
|
|
|
vec_reserve(&self->tmp_buf, size);
|
|
|
|
read_buffer = self->tmp_buf.data;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t rc = read(self->fd, read_buffer, size);
|
2023-04-06 11:50:44 +00:00
|
|
|
if (rc == 0)
|
|
|
|
stream__remote_closed(self);
|
|
|
|
if (rc > 0)
|
|
|
|
self->bytes_received += rc;
|
2023-08-13 12:11:56 +00:00
|
|
|
|
2023-08-15 22:56:42 +00:00
|
|
|
if (rc > 0 && self->cipher) {
|
|
|
|
nvnc_trace("Got cipher text of length %zd", rc);
|
|
|
|
ssize_t len = crypto_cipher_decrypt(self->cipher, dst, size,
|
|
|
|
read_buffer, rc);
|
|
|
|
if (len < 0) {
|
|
|
|
nvnc_log(NVNC_LOG_ERROR, "Message authentication failed!");
|
|
|
|
stream__remote_closed(self);
|
|
|
|
errno = EPROTO;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
rc = len;
|
2023-08-13 12:11:56 +00:00
|
|
|
}
|
|
|
|
|
2023-04-06 11:50:44 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2023-09-03 17:34:06 +00:00
|
|
|
int stream_tcp_send(struct stream* self, struct rcbuf* payload,
|
2023-04-06 11:50:44 +00:00
|
|
|
stream_req_fn on_done, void* userdata)
|
|
|
|
{
|
|
|
|
if (self->state == STREAM_STATE_CLOSED)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
struct stream_req* req = calloc(1, sizeof(*req));
|
|
|
|
if (!req)
|
|
|
|
return -1;
|
|
|
|
|
2023-08-13 12:11:56 +00:00
|
|
|
req->payload = self->cipher ? encrypt_rcbuf(self, payload) : payload;
|
2023-04-06 11:50:44 +00:00
|
|
|
req->on_done = on_done;
|
|
|
|
req->userdata = userdata;
|
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&self->send_queue, req, link);
|
|
|
|
|
|
|
|
return stream_tcp__flush(self);
|
|
|
|
}
|
|
|
|
|
2023-09-03 17:34:06 +00:00
|
|
|
int stream_tcp_send_first(struct stream* self, struct rcbuf* payload)
|
2023-04-07 10:50:10 +00:00
|
|
|
{
|
|
|
|
if (self->state == STREAM_STATE_CLOSED)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
struct stream_req* req = calloc(1, sizeof(*req));
|
|
|
|
if (!req)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
req->payload = payload;
|
|
|
|
TAILQ_INSERT_HEAD(&self->send_queue, req, link);
|
|
|
|
|
|
|
|
return stream_tcp__flush(self);
|
|
|
|
}
|
|
|
|
|
2023-09-03 17:34:06 +00:00
|
|
|
void stream_tcp_exec_and_send(struct stream* self,
|
2023-04-08 13:02:43 +00:00
|
|
|
stream_exec_fn exec_fn, void* userdata)
|
|
|
|
{
|
|
|
|
if (self->state == STREAM_STATE_CLOSED)
|
|
|
|
return;
|
|
|
|
|
|
|
|
struct stream_req* req = calloc(1, sizeof(*req));
|
|
|
|
if (!req)
|
|
|
|
return;
|
|
|
|
|
|
|
|
req->exec = exec_fn;
|
|
|
|
req->userdata = userdata;
|
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&self->send_queue, req, link);
|
|
|
|
|
|
|
|
stream_tcp__flush(self);
|
|
|
|
}
|
|
|
|
|
2023-09-03 17:34:06 +00:00
|
|
|
int stream_tcp_install_cipher(struct stream* self,
|
2023-08-13 12:11:56 +00:00
|
|
|
struct crypto_cipher* cipher)
|
|
|
|
{
|
|
|
|
assert(!self->cipher);
|
|
|
|
self->cipher = cipher;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-04-06 11:50:44 +00:00
|
|
|
static struct stream_impl impl = {
|
|
|
|
.close = stream_tcp_close,
|
|
|
|
.destroy = stream_tcp_destroy,
|
|
|
|
.read = stream_tcp_read,
|
|
|
|
.send = stream_tcp_send,
|
2023-04-07 10:50:10 +00:00
|
|
|
.send_first = stream_tcp_send_first,
|
2023-04-08 13:02:43 +00:00
|
|
|
.exec_and_send = stream_tcp_exec_and_send,
|
2023-08-13 12:11:56 +00:00
|
|
|
.install_cipher = stream_tcp_install_cipher,
|
2023-04-06 11:50:44 +00:00
|
|
|
};
|
|
|
|
|
2023-09-03 17:34:06 +00:00
|
|
|
int stream_tcp_init(struct stream* self, int fd, stream_event_fn on_event,
|
|
|
|
void* userdata)
|
2023-04-06 11:50:44 +00:00
|
|
|
{
|
|
|
|
self->impl = &impl,
|
|
|
|
self->fd = fd;
|
|
|
|
self->on_event = on_event;
|
|
|
|
self->userdata = userdata;
|
|
|
|
|
|
|
|
TAILQ_INIT(&self->send_queue);
|
|
|
|
|
|
|
|
fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK);
|
|
|
|
|
2023-04-11 20:32:37 +00:00
|
|
|
self->handler = aml_handler_new(fd, stream_tcp__on_event, self, NULL);
|
2023-04-06 11:50:44 +00:00
|
|
|
if (!self->handler)
|
2023-09-03 17:34:06 +00:00
|
|
|
return -1;
|
2023-04-06 11:50:44 +00:00
|
|
|
|
|
|
|
if (aml_start(aml_get_default(), self->handler) < 0)
|
|
|
|
goto start_failure;
|
|
|
|
|
|
|
|
stream__poll_r(self);
|
|
|
|
|
2023-09-03 17:34:06 +00:00
|
|
|
return 0;
|
2023-04-06 11:50:44 +00:00
|
|
|
|
|
|
|
start_failure:
|
|
|
|
aml_unref(self->handler);
|
2023-09-03 17:34:06 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct stream* stream_new(int fd, stream_event_fn on_event, void* userdata)
|
|
|
|
{
|
|
|
|
struct stream* self = calloc(1, STREAM_ALLOC_SIZE);
|
|
|
|
if (!self)
|
|
|
|
return NULL;
|
|
|
|
|
2023-09-03 21:59:35 +00:00
|
|
|
if (stream_tcp_init(self, fd, on_event, userdata) < 0) {
|
2023-09-03 17:34:06 +00:00
|
|
|
free(self);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return self;
|
|
|
|
|
2023-04-06 11:50:44 +00:00
|
|
|
}
|