diff --git a/network/tcpquiclab/Makefile b/network/tcpquiclab/Makefile index f688051..7e5aaac 100644 --- a/network/tcpquiclab/Makefile +++ b/network/tcpquiclab/Makefile @@ -2,7 +2,7 @@ CC = gcc CFLAGS = -Wall -g LDFLAGS = -lquiche -ldl -lpthread -lm -all: tcp_server tcp_client quic_server quic_client tcp_perf_server tcp_perf_client quic_perf_server quic_perf_client +all: tcp_server tcp_client quic_server quic_client tcp_perf_server tcp_perf_client quic_perf_server quic_perf_client tcp_multi_server tcp_multi_client quic_multi_server quic_multi_client tcp_server: tcp_server.c $(CC) $(CFLAGS) -o tcp_server tcp_server.c @@ -28,5 +28,17 @@ quic_perf_server: quic_perf_server.c quic_perf_client: quic_perf_client.c $(CC) $(CFLAGS) -o quic_perf_client quic_perf_client.c $(LDFLAGS) +tcp_multi_server: tcp_multi_server.c + $(CC) $(CFLAGS) -o tcp_multi_server tcp_multi_server.c -lpthread + +tcp_multi_client: tcp_multi_client.c + $(CC) $(CFLAGS) -o tcp_multi_client tcp_multi_client.c -lpthread + +quic_multi_server: quic_multi_server.c + $(CC) $(CFLAGS) -o quic_multi_server quic_multi_server.c $(LDFLAGS) + +quic_multi_client: quic_multi_client.c + $(CC) $(CFLAGS) -o quic_multi_client quic_multi_client.c $(LDFLAGS) + clean: - rm -f tcp_server tcp_client quic_server quic_client tcp_perf_server tcp_perf_client quic_perf_server quic_perf_client + rm -f tcp_server tcp_client quic_server quic_client tcp_perf_server tcp_perf_client quic_perf_server quic_perf_client tcp_multi_server tcp_multi_client quic_multi_server quic_multi_client diff --git a/network/tcpquiclab/README_LINUX.md b/network/tcpquiclab/README_LINUX.md index 5219954..87b6623 100644 --- a/network/tcpquiclab/README_LINUX.md +++ b/network/tcpquiclab/README_LINUX.md @@ -103,7 +103,38 @@ We use Linux `tc` (Traffic Control) with `netem` instead of `clumsy`. sudo tc qdisc del dev lo root ``` -### 3.3 & 3.4 Advanced Tests +### 3.3 Advanced Test: Multiplexing vs Multi-Connection -- **Multiplexing:** The current `quic_perf_client` uses a single stream (Stream ID 4). You can modify the code to launch multiple streams in parallel to test head-of-line blocking resilience. -- **Network Recovery:** Use `tc` to drop 100% packets (`loss 100%`) for 30 seconds during a long transfer, then remove the rule (`del`) to see if the connection recovers. +This task compares the performance of 5 parallel TCP connections against a single QUIC connection with 5 concurrent streams. + +**Scenario 1: TCP Multi-Connection** +Establish 5 TCP connections simultaneously, each transferring 20MB (Total 100MB). + +1. Start TCP Multi-Connection Server: + ```bash + ./tcp_multi_server + ``` +2. Run TCP Multi-Connection Client: + ```bash + ./tcp_multi_client + ``` +3. Record total time and throughput from the server output. + +**Scenario 2: QUIC Single-Connection Multi-Streaming** +Establish 1 QUIC connection and open 5 streams concurrently, each transferring 20MB (Total 100MB). + +1. Start QUIC Multi-Stream Server: + ```bash + ./quic_multi_server + ``` +2. Run QUIC Multi-Stream Client: + ```bash + ./quic_multi_client + ``` +3. Record the performance statistics. + +**Analysis Points:** +- Compare completion times in a normal network. +- Use `tc` to simulate packet loss (e.g., 5%). Observe how QUIC's multiplexing avoids TCP's Head-of-Line (HoL) blocking, where a single lost packet in one TCP connection doesn't stall the other streams in QUIC. + +### 3.4 Network Recovery diff --git a/network/tcpquiclab/README_LINUX_CN.md b/network/tcpquiclab/README_LINUX_CN.md index 0dcd0af..85d595f 100644 --- a/network/tcpquiclab/README_LINUX_CN.md +++ b/network/tcpquiclab/README_LINUX_CN.md @@ -103,7 +103,38 @@ make sudo tc qdisc del dev lo root ``` -### 3.3 & 3.4 进阶测试 +### 3.3 进阶测试:多路复用与多连接对比 -- **多路复用:** 当前的 `quic_perf_client` 使用单个流 (Stream ID 4)。您可以修改代码并行启动多个流,以测试 QUIC 解决队头阻塞问题的能力。 -- **网络恢复:** 在长传输过程中,使用 `tc` 设置 100% 丢包 (`loss 100%`) 持续 30 秒,然后删除规则 (`del`),观察连接是否能恢复传输。 +本实验任务要求对比 TCP 多连接与 QUIC 多流复用的性能。 + +**场景 1: TCP 多连接并发** +同时建立 5 个 TCP 连接,每个连接传输 20MB 数据 (总计 100MB)。 + +1. 启动 TCP 多连接服务器: + ```bash + ./tcp_multi_server + ``` +2. 启动 TCP 多连接客户端: + ```bash + ./tcp_multi_client + ``` +3. 记录服务器输出的总时间与吞吐量。 + +**场景 2: QUIC 单连接多流复用** +建立 1 个 QUIC 连接,在其中同时开启 5 个流 (Stream),每个流传输 20MB 数据 (总计 100MB)。 + +1. 启动 QUIC 多流服务器: + ```bash + ./quic_multi_server + ``` +2. 启动 QUIC 多流客户端: + ```bash + ./quic_multi_client + ``` +3. 记录服务器输出的统计数据。 + +**分析重点:** +- 在正常网络下,两者的总耗时差异。 +- 使用 `tc` 模拟丢包 (如 5%) 后,对比两者性能下降的幅度。QUIC 的多流复用应能避免 TCP 的“队头阻塞”问题 (即一个包丢失不影响其他流的传输),从而在丢包环境下表现更优。 + +### 3.4 网络恢复测试 diff --git a/network/tcpquiclab/quic_multi_client b/network/tcpquiclab/quic_multi_client new file mode 100755 index 0000000..4e44fc9 Binary files /dev/null and b/network/tcpquiclab/quic_multi_client differ diff --git a/network/tcpquiclab/quic_multi_client.c b/network/tcpquiclab/quic_multi_client.c new file mode 100644 index 0000000..bfa84cd --- /dev/null +++ b/network/tcpquiclab/quic_multi_client.c @@ -0,0 +1,169 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_DATAGRAM_SIZE 1350 +#define TOTAL_MB 100 +#define NUM_STREAMS 5 +#define MB_PER_STREAM (TOTAL_MB / NUM_STREAMS) + +typedef struct { + uint64_t stream_id; + long long bytes_sent; + long long bytes_total; + bool finished; +} StreamState; + +int main(int argc, char *argv[]) { + quiche_config *config = quiche_config_new(QUICHE_PROTOCOL_VERSION); + if (config == NULL) return -1; + + quiche_config_verify_peer(config, false); + quiche_config_set_application_protos(config, (uint8_t *) "\x0ahq-interop\x05hq-29\x05hq-28\x05hq-27\x08http/0.9", 38); + quiche_config_set_max_idle_timeout(config, 10000); + quiche_config_set_max_recv_udp_payload_size(config, MAX_DATAGRAM_SIZE); + quiche_config_set_max_send_udp_payload_size(config, MAX_DATAGRAM_SIZE); + quiche_config_set_initial_max_data(config, 1024 * 1024 * 500); + quiche_config_set_initial_max_stream_data_bidi_local(config, 1024 * 1024 * 100); + quiche_config_set_initial_max_stream_data_bidi_remote(config, 1024 * 1024 * 100); + quiche_config_set_initial_max_streams_bidi(config, 100); + quiche_config_set_cc_algorithm(config, QUICHE_CC_RENO); + + int sock = socket(AF_INET, SOCK_DGRAM, 0); + if (sock < 0) return -1; + + struct sockaddr_in peer_addr; + memset(&peer_addr, 0, sizeof(peer_addr)); + peer_addr.sin_family = AF_INET; + peer_addr.sin_port = htons(8889); + inet_pton(AF_INET, "100.115.45.1", &peer_addr.sin_addr); + + if (connect(sock, (struct sockaddr *)&peer_addr, sizeof(peer_addr)) < 0) return -1; + + struct sockaddr_in local_addr; + socklen_t local_addr_len = sizeof(local_addr); + if (getsockname(sock, (struct sockaddr *)&local_addr, &local_addr_len) < 0) return -1; + + int flags = fcntl(sock, F_GETFL, 0); + fcntl(sock, F_SETFL, flags | O_NONBLOCK); + + uint8_t scid[QUICHE_MAX_CONN_ID_LEN]; + int rng = open("/dev/urandom", O_RDONLY); + read(rng, scid, sizeof(scid)); + close(rng); + + quiche_conn *conn = quiche_connect("100.115.45.1", (const uint8_t *)scid, sizeof(scid), (struct sockaddr *)&local_addr, local_addr_len, (struct sockaddr *)&peer_addr, sizeof(peer_addr), config); + if (conn == NULL) return -1; + + printf("Connecting to QUIC Multi-Stream Server...\n"); + printf("Sending %d streams, %d MB each (Total %d MB)...\n", NUM_STREAMS, MB_PER_STREAM, TOTAL_MB); + + uint8_t buf[65535]; + uint8_t out[MAX_DATAGRAM_SIZE]; + uint8_t payload[4096]; + memset(payload, 'C', sizeof(payload)); + + // Initialize stream states + StreamState streams[NUM_STREAMS]; + for (int i = 0; i < NUM_STREAMS; i++) { + streams[i].stream_id = 4 * i + 4; // 4, 8, 12, 16, 20... (Client Bidi) or simple increment if library handles it? + // Note: Client initiated bidi streams usually start at 0, then 4, 8... + // but let's stick to explicit IDs or check quiche docs. + // Quiche expects us to use IDs. 0, 4, 8, 12, 16 are valid client bidi. + streams[i].stream_id = i * 4; + streams[i].bytes_sent = 0; + streams[i].bytes_total = (long long)MB_PER_STREAM * 1024 * 1024; + streams[i].finished = false; + } + + bool all_finished = false; + + while (1) { + ssize_t read_len = recv(sock, buf, sizeof(buf), MSG_DONTWAIT); + if (read_len > 0) { + quiche_conn_recv(conn, buf, read_len, &(quiche_recv_info){ + .to = (struct sockaddr *)&local_addr, + .to_len = local_addr_len, + .from = (struct sockaddr *)&peer_addr, + .from_len = sizeof(peer_addr), + }); + } + + if (quiche_conn_is_closed(conn)) { + printf("Connection closed.\n"); + break; + } + + if (quiche_conn_is_established(conn)) { + all_finished = true; + for (int i = 0; i < NUM_STREAMS; i++) { + if (!streams[i].finished) { + all_finished = false; + // Try to send on this stream + while (streams[i].bytes_sent < streams[i].bytes_total) { + uint64_t err_code = 0; + // Determine payload size + ssize_t sent = quiche_conn_stream_send(conn, streams[i].stream_id, payload, sizeof(payload), false, &err_code); + if (sent > 0) { + streams[i].bytes_sent += sent; + if (streams[i].bytes_sent >= streams[i].bytes_total) { + // Send FIN + quiche_conn_stream_send(conn, streams[i].stream_id, NULL, 0, true, &err_code); + streams[i].finished = true; + printf("Stream %ld finished.\n", streams[i].stream_id); + } + } else { + // E.g. Done (congestion) or Stream Limit + break; + } + } + } + } + + if (all_finished) { + // Wait a bit to ensure ACKs or just exit? + // Ideally wait for close, but let's just loop a bit or wait for idle. + // Actually the server will likely not close connection, we can just idle. + } + } + + bool has_outgoing = false; + while (1) { + quiche_send_info send_info; + ssize_t written = quiche_conn_send(conn, out, sizeof(out), &send_info); + if (written == QUICHE_ERR_DONE) break; + if (written < 0) break; + send(sock, out, written, 0); + has_outgoing = true; + } + + quiche_conn_on_timeout(conn); + if (!has_outgoing && !all_finished) usleep(100); + if (all_finished && !has_outgoing) { + // Maybe wait for connection close or timeout + usleep(100000); // Wait 100ms + // quiche_conn_close(conn, true, 0, "Done", 4); + // break; + // Let's keep it alive for a moment for server to ack then exit + static int linger = 0; + if (linger++ > 20) { + printf("All streams finished. Closing.\n"); + uint8_t reason[] = "done"; + quiche_conn_close(conn, true, 0, reason, sizeof(reason)); + break; + } + } + } + + quiche_conn_free(conn); + quiche_config_free(config); + return 0; +} diff --git a/network/tcpquiclab/quic_multi_server b/network/tcpquiclab/quic_multi_server new file mode 100755 index 0000000..73b4663 Binary files /dev/null and b/network/tcpquiclab/quic_multi_server differ diff --git a/network/tcpquiclab/quic_multi_server.c b/network/tcpquiclab/quic_multi_server.c new file mode 100644 index 0000000..c25bdcd --- /dev/null +++ b/network/tcpquiclab/quic_multi_server.c @@ -0,0 +1,179 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_DATAGRAM_SIZE 1350 +#define LOCAL_CONN_ID_LEN 16 +#define TARGET_MB 100 // Target total MB to receive + +typedef struct { + int sock; + struct sockaddr_storage peer_addr; + socklen_t peer_addr_len; + quiche_conn *conn; + long long total_bytes; + struct timespec start_time; + int timer_started; +} Client; + +int main(int argc, char *argv[]) { + quiche_config *config = quiche_config_new(QUICHE_PROTOCOL_VERSION); + if (config == NULL) return -1; + + quiche_config_load_cert_chain_from_pem_file(config, "cert.crt"); + quiche_config_load_priv_key_from_pem_file(config, "cert.key"); + + quiche_config_set_application_protos(config, (uint8_t *) "\x0ahq-interop\x05hq-29\x05hq-28\x05hq-27\x08http/0.9", 38); + quiche_config_set_max_idle_timeout(config, 10000); + quiche_config_set_max_recv_udp_payload_size(config, MAX_DATAGRAM_SIZE); + quiche_config_set_max_send_udp_payload_size(config, MAX_DATAGRAM_SIZE); + quiche_config_set_initial_max_data(config, 1024 * 1024 * 500); + quiche_config_set_initial_max_stream_data_bidi_local(config, 1024 * 1024 * 100); + quiche_config_set_initial_max_stream_data_bidi_remote(config, 1024 * 1024 * 100); + quiche_config_set_initial_max_streams_bidi(config, 100); + quiche_config_set_cc_algorithm(config, QUICHE_CC_RENO); + + struct sockaddr_in sa; + memset(&sa, 0, sizeof(sa)); + sa.sin_family = AF_INET; + sa.sin_port = htons(8889); + sa.sin_addr.s_addr = INADDR_ANY; + + int sock = socket(AF_INET, SOCK_DGRAM, 0); + if (sock < 0) return -1; + if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) return -1; + + int flags = fcntl(sock, F_GETFL, 0); + fcntl(sock, F_SETFL, flags | O_NONBLOCK); + + printf("QUIC Multi-Stream Server listening on port 8889\n"); + printf("Expecting approx %d MB total data...\n", TARGET_MB); + + Client *client = NULL; + uint8_t buf[65535]; + uint8_t out[MAX_DATAGRAM_SIZE]; + bool done_printing = false; + + while (1) { + struct sockaddr_storage peer_addr; + socklen_t peer_addr_len = sizeof(peer_addr); + ssize_t read_len = recvfrom(sock, buf, sizeof(buf), 0, (struct sockaddr *)&peer_addr, &peer_addr_len); + + if (read_len > 0) { + uint8_t type; + uint32_t version; + uint8_t scid[QUICHE_MAX_CONN_ID_LEN]; + size_t scid_len = sizeof(scid); + uint8_t dcid[QUICHE_MAX_CONN_ID_LEN]; + size_t dcid_len = sizeof(dcid); + uint8_t token[256]; + size_t token_len = sizeof(token); + + int rc = quiche_header_info(buf, read_len, LOCAL_CONN_ID_LEN, &version, &type, scid, &scid_len, dcid, &dcid_len, token, &token_len); + + if (rc >= 0) { + if (client == NULL) { + if (!quiche_version_is_supported(version)) { + ssize_t written = quiche_negotiate_version(scid, scid_len, dcid, dcid_len, out, sizeof(out)); + if (written > 0) sendto(sock, out, written, 0, (struct sockaddr *)&peer_addr, peer_addr_len); + } else { + client = malloc(sizeof(Client)); + client->sock = sock; + client->peer_addr = peer_addr; + client->peer_addr_len = peer_addr_len; + + uint8_t server_scid[QUICHE_MAX_CONN_ID_LEN]; + int rng = open("/dev/urandom", O_RDONLY); + read(rng, server_scid, sizeof(server_scid)); + close(rng); + + client->conn = quiche_accept(server_scid, sizeof(server_scid), dcid, dcid_len, (struct sockaddr *)&sa, sizeof(sa), (struct sockaddr *)&peer_addr, peer_addr_len, config); + client->total_bytes = 0; + client->timer_started = 0; + printf("Connection accepted.\n"); + } + } + + if (client != NULL) { + quiche_conn_recv(client->conn, buf, read_len, &(quiche_recv_info){ + .to = (struct sockaddr *)&sa, + .to_len = sizeof(sa), + .from = (struct sockaddr *)&peer_addr, + .from_len = peer_addr_len, + }); + } + } + } + + if (client != NULL) { + quiche_conn *conn = client->conn; + + if (quiche_conn_is_closed(conn)) { + printf("Connection closed.\n"); + free(client); + client = NULL; + break; + } + + if (quiche_conn_is_established(conn)) { + uint64_t s = 0; + quiche_stream_iter *readable = quiche_conn_readable(conn); + while (quiche_stream_iter_next(readable, &s)) { + if (!client->timer_started) { + clock_gettime(CLOCK_MONOTONIC, &client->start_time); + client->timer_started = 1; + } + + uint8_t recv_buf[65535]; + bool fin = false; + uint64_t err_code = 0; + ssize_t recv_bytes = quiche_conn_stream_recv(conn, s, recv_buf, sizeof(recv_buf), &fin, &err_code); + if (recv_bytes > 0) { + client->total_bytes += recv_bytes; + } + } + quiche_stream_iter_free(readable); + + // Check if target reached (fuzzy check as logic overhead might mean exact bytes vary slightly or we just use >=) + if (client->total_bytes >= (long long)TARGET_MB * 1024 * 1024 && !done_printing) { + struct timespec end; + clock_gettime(CLOCK_MONOTONIC, &end); + double time_taken = (end.tv_sec - client->start_time.tv_sec) + (end.tv_nsec - client->start_time.tv_nsec) / 1e9; + double mb = client->total_bytes / (1024.0 * 1024.0); + double throughput = mb / time_taken; + + printf("\nTest Finished:\n"); + printf("Total Data Received: %.2f MB\n", mb); + printf("Time Taken: %.2f seconds\n", time_taken); + printf("Total Throughput: %.2f MB/s\n", throughput); + done_printing = true; + } + } + + bool has_outgoing = false; + while (1) { + quiche_send_info send_info; + ssize_t written = quiche_conn_send(conn, out, sizeof(out), &send_info); + if (written == QUICHE_ERR_DONE) break; + if (written < 0) break; + sendto(sock, out, written, 0, (struct sockaddr *)&send_info.to, send_info.to_len); + has_outgoing = true; + } + + quiche_conn_on_timeout(conn); + if (!has_outgoing && !done_printing) usleep(100); + if (done_printing && !has_outgoing) usleep(10000); // Slow down if done + } + } + + quiche_config_free(config); + return 0; +} diff --git a/network/tcpquiclab/tcp_multi_client b/network/tcpquiclab/tcp_multi_client new file mode 100755 index 0000000..6d6dd65 Binary files /dev/null and b/network/tcpquiclab/tcp_multi_client differ diff --git a/network/tcpquiclab/tcp_multi_client.c b/network/tcpquiclab/tcp_multi_client.c new file mode 100644 index 0000000..d09de10 --- /dev/null +++ b/network/tcpquiclab/tcp_multi_client.c @@ -0,0 +1,70 @@ +#include +#include +#include +#include +#include +#include +#include + +#define PORT 8081 +#define SERVER_IP "100.115.45.1" // Default IP, change if needed +#define BUFFER_SIZE 4096 +#define TOTAL_MB 100 +#define NUM_CONNS 5 +#define MB_PER_CONN (TOTAL_MB / NUM_CONNS) + +void *send_data(void *arg) { + int sock = 0; + struct sockaddr_in serv_addr; + char buffer[BUFFER_SIZE]; + memset(buffer, 'B', BUFFER_SIZE); + + if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + printf("\n Socket creation error \n"); + return NULL; + } + + serv_addr.sin_family = AF_INET; + serv_addr.sin_port = htons(PORT); + + if (inet_pton(AF_INET, SERVER_IP, &serv_addr.sin_addr) <= 0) { + printf("\nInvalid address/ Address not supported \n"); + return NULL; + } + + if (connect(sock, (struct sockaddr *)&serv_addr, sizeof(serv_addr)) < 0) { + printf("\nConnection Failed \n"); + return NULL; + } + + long long bytes_to_send = (long long)MB_PER_CONN * 1024 * 1024; + long long bytes_sent = 0; + + while (bytes_sent < bytes_to_send) { + int to_send = (bytes_to_send - bytes_sent > BUFFER_SIZE) ? BUFFER_SIZE : (bytes_to_send - bytes_sent); + send(sock, buffer, to_send, 0); + bytes_sent += to_send; + } + + close(sock); + return NULL; +} + +int main(int argc, char const *argv[]) { + pthread_t threads[NUM_CONNS]; + printf("Starting %d TCP connections, sending %d MB each (Total %d MB).\n", NUM_CONNS, MB_PER_CONN, TOTAL_MB); + + for (int i = 0; i < NUM_CONNS; i++) { + if (pthread_create(&threads[i], NULL, send_data, NULL) != 0) { + perror("Thread create failed"); + return 1; + } + } + + for (int i = 0; i < NUM_CONNS; i++) { + pthread_join(threads[i], NULL); + } + + printf("All connections finished sending.\n"); + return 0; +} diff --git a/network/tcpquiclab/tcp_multi_server b/network/tcpquiclab/tcp_multi_server new file mode 100755 index 0000000..c3d8c62 Binary files /dev/null and b/network/tcpquiclab/tcp_multi_server differ diff --git a/network/tcpquiclab/tcp_multi_server.c b/network/tcpquiclab/tcp_multi_server.c new file mode 100644 index 0000000..209b8f0 --- /dev/null +++ b/network/tcpquiclab/tcp_multi_server.c @@ -0,0 +1,118 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define PORT 8081 +#define BUFFER_SIZE 4096 +#define EXPECTED_CONNECTIONS 5 +#define TOTAL_TARGET_MB 100 + +long long global_bytes_received = 0; +int connections_handled = 0; +pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; +struct timespec start_time, end_time; +int first_connect = 1; + +void *handle_client(void *socket_desc) { + int sock = *(int*)socket_desc; + free(socket_desc); + char buffer[BUFFER_SIZE]; + int valread; + long long thread_bytes = 0; + + while ((valread = read(sock, buffer, BUFFER_SIZE)) > 0) { + thread_bytes += valread; + } + + pthread_mutex_lock(&lock); + global_bytes_received += thread_bytes; + connections_handled++; + pthread_mutex_unlock(&lock); + + close(sock); + return NULL; +} + +int main() { + int server_fd, new_socket; + struct sockaddr_in address; + int opt = 1; + int addrlen = sizeof(address); + + if ((server_fd = socket(AF_INET, SOCK_STREAM, 0)) == 0) { + perror("socket failed"); + exit(EXIT_FAILURE); + } + + if (setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT, &opt, sizeof(opt))) { + perror("setsockopt"); + exit(EXIT_FAILURE); + } + + address.sin_family = AF_INET; + address.sin_addr.s_addr = INADDR_ANY; + address.sin_port = htons(PORT); + + if (bind(server_fd, (struct sockaddr *)&address, sizeof(address)) < 0) { + perror("bind failed"); + exit(EXIT_FAILURE); + } + + if (listen(server_fd, 5) < 0) { + perror("listen"); + exit(EXIT_FAILURE); + } + + printf("TCP Multi-Connection Server listening on port %d...\n", PORT); + printf("Waiting for %d connections to transfer total %d MB...\n", EXPECTED_CONNECTIONS, TOTAL_TARGET_MB); + + pthread_t threads[EXPECTED_CONNECTIONS]; + int t_count = 0; + + while (t_count < EXPECTED_CONNECTIONS) { + if ((new_socket = accept(server_fd, (struct sockaddr *)&address, (socklen_t*)&addrlen)) < 0) { + perror("accept"); + exit(EXIT_FAILURE); + } + + if (first_connect) { + clock_gettime(CLOCK_MONOTONIC, &start_time); + first_connect = 0; + printf("First connection received. Timer started.\n"); + } + + int *new_sock = malloc(1); + *new_sock = new_socket; + + if (pthread_create(&threads[t_count], NULL, handle_client, (void*)new_sock) < 0) { + perror("could not create thread"); + return 1; + } + t_count++; + } + + // Wait for all threads to finish + for (int i = 0; i < EXPECTED_CONNECTIONS; i++) { + pthread_join(threads[i], NULL); + } + + clock_gettime(CLOCK_MONOTONIC, &end_time); + + double time_taken = (end_time.tv_sec - start_time.tv_sec) + (end_time.tv_nsec - start_time.tv_nsec) / 1e9; + double mb = global_bytes_received / (1024.0 * 1024.0); + double throughput = mb / time_taken; + + printf("\nTest Finished:\n"); + printf("Total Connections: %d\n", connections_handled); + printf("Total Data Received: %.2f MB\n", mb); + printf("Time Taken: %.2f seconds\n", time_taken); + printf("Total Throughput: %.2f MB/s\n", throughput); + + close(server_fd); + return 0; +}