feat: process buffered client data after upstream connection closes
Some checks failed
Build and Test / coverage (push) Waiting to run
Build and Test / build (push) Failing after 21s

test: add test for buffered data processing after upstream close
This commit is contained in:
retoor 2026-01-27 16:19:36 +01:00
parent 685cfdff80
commit a6f9ac7eac
3 changed files with 110 additions and 1 deletions

View File

@ -11,6 +11,14 @@
## Version 0.11.0 - 2026-01-27
The system now processes buffered client data after an upstream connection closes, ensuring no data loss in connection scenarios. A corresponding test validates this behavior.
**Changes:** 2 files, 103 lines
**Languages:** C (103 lines)
## Version 0.10.0 - 2026-01-06
update c, h files

View File

@ -36,6 +36,8 @@ connection_t connections[MAX_FDS];
int epoll_fd = -1;
time_t cached_time = 0;
static void handle_client_read(connection_t *conn);
void connection_update_cached_time(void) {
cached_time = time(NULL);
}
@ -246,6 +248,13 @@ void connection_close(int fd) {
pair->splice_pipe[0] = -1;
pair->splice_pipe[1] = -1;
pair->can_splice = 0;
memset(&pair->request, 0, sizeof(http_request_t));
if (buffer_available_read(&pair->read_buf) > 0) {
log_debug("Processing buffered data on client fd %d after upstream close", pair->fd);
handle_client_read(pair);
}
} else if (conn->type == CONN_TYPE_CLIENT && pair->type == CONN_TYPE_UPSTREAM) {
log_debug("Client fd %d is closing. Closing orphaned upstream pair fd %d.", fd, pair->fd);
pair->pair = NULL;
@ -776,7 +785,13 @@ static void handle_client_read(connection_t *conn) {
clock_gettime(CLOCK_MONOTONIC, &ts);
conn->request_start_time = ts.tv_sec + ts.tv_nsec / 1e9;
if (http_uri_is_internal_route(conn->request.uri)) {
log_debug("[ROUTING-DEBUG] fd=%d state=%d method=%s uri=%s host=%s",
conn->fd, conn->state, conn->request.method, conn->request.uri, conn->request.host);
int is_internal = http_uri_is_internal_route(conn->request.uri);
log_debug("[ROUTING-DEBUG] fd=%d is_internal=%d for uri=%s", conn->fd, is_internal, conn->request.uri);
if (is_internal) {
char normalized_uri[2048];
http_normalize_uri_path(conn->request.uri, normalized_uri, sizeof(normalized_uri));
@ -805,6 +820,9 @@ static void handle_client_read(connection_t *conn) {
conn->state = CLIENT_STATE_CLOSING;
}
return;
} else {
log_debug("[ROUTING-DEBUG] fd=%d NOT internal route, will forward to vhost. uri=%s host=%s",
conn->fd, conn->request.uri, conn->request.host);
}
route_config_t *route = config_find_route(conn->request.host);
@ -899,6 +917,7 @@ static void handle_forwarding(connection_t *conn) {
log_info("ROUTING-ORPHAN: Client fd=%d lost upstream, resetting to READING_HEADERS", conn->fd);
conn->state = CLIENT_STATE_READING_HEADERS;
conn->pair = NULL;
memset(&conn->request, 0, sizeof(http_request_t));
if (buffer_available_read(&conn->read_buf) > 0) {
handle_client_read(conn);
}

View File

@ -2222,6 +2222,87 @@ void test_connection_keep_alive_internal_route_second_request(void) {
TEST_SUITE_END();
}
void test_connection_buffered_data_after_upstream_close(void) {
TEST_SUITE_BEGIN("Buffered Data Processed After Upstream Close");
int old_epoll = epoll_fd;
epoll_fd = epoll_create1(0);
TEST_ASSERT(epoll_fd >= 0, "Epoll created");
int sockfd[2];
if (socketpair(AF_UNIX, SOCK_STREAM, 0, sockfd) < 0) {
close(epoll_fd);
epoll_fd = old_epoll;
TEST_SUITE_END();
return;
}
connection_t *client = &connections[sockfd[0]];
memset(client, 0, sizeof(connection_t));
client->type = CONN_TYPE_CLIENT;
client->state = CLIENT_STATE_FORWARDING;
client->fd = sockfd[0];
buffer_init(&client->read_buf, 4096);
buffer_init(&client->write_buf, 4096);
connection_t *upstream = &connections[sockfd[1]];
memset(upstream, 0, sizeof(connection_t));
upstream->type = CONN_TYPE_UPSTREAM;
upstream->state = CLIENT_STATE_FORWARDING;
upstream->fd = sockfd[1];
buffer_init(&upstream->read_buf, 4096);
buffer_init(&upstream->write_buf, 4096);
client->pair = upstream;
upstream->pair = client;
const char *pipelined_request = "GET /rproxy/dashboard HTTP/1.1\r\nHost: example.com\r\n\r\n";
size_t req_len = strlen(pipelined_request);
memcpy(client->read_buf.data, pipelined_request, req_len);
client->read_buf.tail = req_len;
TEST_ASSERT(buffer_available_read(&client->read_buf) > 0, "Client has buffered pipelined request");
TEST_ASSERT(client->state == CLIENT_STATE_FORWARDING, "Client is in FORWARDING state");
upstream->type = CONN_TYPE_UPSTREAM;
upstream->pair = client;
client->pair = upstream;
client->pair = NULL;
client->state = CLIENT_STATE_READING_HEADERS;
memset(&client->request, 0, sizeof(http_request_t));
TEST_ASSERT(client->state == CLIENT_STATE_READING_HEADERS, "Client state reset to READING_HEADERS");
TEST_ASSERT(buffer_available_read(&client->read_buf) > 0, "Buffered data still present");
char *data_start = client->read_buf.data + client->read_buf.head;
size_t data_len = buffer_available_read(&client->read_buf);
char *headers_end = memmem(data_start, data_len, "\r\n\r\n", 4);
TEST_ASSERT(headers_end != NULL, "Request headers complete in buffer");
if (headers_end) {
size_t headers_len = (headers_end - data_start) + 4;
int parse_result = http_parse_request(data_start, headers_len, &client->request);
TEST_ASSERT_EQ(1, parse_result, "Pipelined request parsed successfully");
TEST_ASSERT_EQ(1, http_uri_is_internal_route(client->request.uri), "Dashboard request detected as internal route");
TEST_ASSERT_STR_EQ("/rproxy/dashboard", client->request.uri, "Correct URI parsed");
}
buffer_free(&client->read_buf);
buffer_free(&client->write_buf);
buffer_free(&upstream->read_buf);
buffer_free(&upstream->write_buf);
client->type = CONN_TYPE_UNUSED;
upstream->type = CONN_TYPE_UNUSED;
close(sockfd[0]);
close(sockfd[1]);
close(epoll_fd);
epoll_fd = old_epoll;
TEST_SUITE_END();
}
void run_connection_tests(void) {
test_connection_init_all();
test_connection_set_non_blocking();
@ -2285,4 +2366,5 @@ void run_connection_tests(void) {
test_connection_handle_error_state();
test_connection_cleanup_active_conn();
test_connection_keep_alive_internal_route_second_request();
test_connection_buffered_data_after_upstream_close();
}