Compare commits

...

17 Commits

Author SHA1 Message Date
bf682e0756 Add build derivation 2025-10-10 20:18:37 -05:00
126eba308b fix reliance on C++20 since apparently that doesn't work on Ubuntu 2025-09-26 20:59:44 -05:00
80690945fe switch to Boost for network connectivity 2025-09-26 20:03:28 -05:00
5f57f4631b mention Nix in readme 2025-09-05 20:03:22 -05:00
2f991b1eaa add Nix flake 2025-09-05 19:59:59 -05:00
4372684d00 fix compile issues 2025-09-05 19:59:51 -05:00
e3af31f1e0 add boost libs 2025-04-25 20:12:28 -05:00
ed8c594a30 setup more proper logging and get stuff working maybe 2025-03-26 20:57:23 -05:00
ea952e1981 rewrite packets to send in jpeg form 2025-03-26 19:13:13 -05:00
c85581749b first attempt at networking (does not work) 2025-03-26 15:28:16 -05:00
a134513b9c start adding security vulnerabilities (packet class) 2025-03-25 17:08:58 -05:00
7c780e0017 add readme 2025-03-25 16:28:58 -05:00
54246257c9 setup Meson project, get the server to read a video feed from a local camera 2025-03-25 16:26:35 -05:00
d4aa4eabb4 move the original python code to legacy subdirectory 2025-03-25 15:51:16 -05:00
08a09e3b15 switch to close enough matching 2025-02-01 15:40:01 -06:00
00a5bceffb only send differing packets 2025-02-01 13:38:59 -06:00
8ba524087d use asyncudp to (hopefully) accelerate networking, also add a tiled image packet for testing 2025-02-01 13:24:42 -06:00
14 changed files with 768 additions and 109 deletions

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
.venv .venv
.idea .idea
build

View File

@@ -3,56 +3,44 @@
This project is a demo for streaming video output from multiple "client" devices to one "server". This is a basic demo This project is a demo for streaming video output from multiple "client" devices to one "server". This is a basic demo
of what sauron-cv seeks to accomplish. of what sauron-cv seeks to accomplish.
## Installation ## Building
It is strongly recommended that you use a virtual environment. These instructions will assume you are using venv, you This project uses [Meson](https://mesonbuild.com/) for build management. This project also optionally uses
can substitute this with your preferred environment management. You will need to make sure that the `virtualenv` package [Nix](https://nix.dev/) for system dependency management.
is installed globally, either via pip or the `python3-virtualenv` package in your system package manager.
When first cloning this repo, run the following: ### Using Nix
```shell ```shell
python -m venv .venv nix develop
source .venv/bin/activate
pip install -r requirements.txt
``` ```
This will create a virtual environment, enter that virtual environment, and install the required packages. ### Install Dependencies
If you start a new shell, you will need to re-enter the virtual environment: Install the following packages from your distribution package manager:
- `meson`
- `opencv`
- `boost`
A better procedure for this (hopefully involving Meson) will be added / documented at a later date.
### Setup Meson
```shell ```shell
source .venv/bin/activate meson setup build
```
*NOTE FOR JETBRAINS / CLION USERS: PLEASE SET YOUR MESON BUILD DIRECTORY TO `build` IN THE IDE SETTINGS UNDER "Build /
Execution / Deployment" -> "Meson"*
### Compiling
```shell
meson build client # for client only
meson build server # for server only
``` ```
## Running ## Running
### Client
To run the client with a localhost target:
```shell ```shell
python -m client ./build/client # for client application
./build/server # for server application
``` ```
To target an external server, provide its IP address using the `-s` flag:
```shell
python -m client -s [server IP address]
```
### Server
```shell
python -m server
```
### Common Flags
Make sure that these match between your client and server!!
| Short | Long | Description | Default |
|-------|---|---|---|
| `-p` | `--port` | The port for the client / server to communicate on. | `5005` |
| `-W` | `--width` | Image width in pixels. | `640` |
| `-H` | `--height` | Image height in pixels. | `480` |

60
client.cpp Normal file
View File

@@ -0,0 +1,60 @@
#include <opencv2/videoio.hpp>
#include <cstring>
#include <iostream>
#include <netinet/in.h>
#include <sys/socket.h>
#include <unistd.h>
#include "transfer.h"
#include "logging.h"
#include <chrono>
#include <boost/asio.hpp>
using namespace std;
using boost::asio::ip::tcp;
int main() {
const int FRAME_DELAY_MS = 1000 / 30;
// create video capture
cv::VideoCapture cap = cv::VideoCapture(0);
try {
boost::asio::io_context io_context;
tcp::resolver resolver(io_context);
tcp::resolver::results_type endpoints =
resolver.resolve("127.0.0.1", "8080");
cv::Mat image = cv::Mat::zeros(cv::Size(640, 480), CV_8UC3);
info("Ready to connect.");
// sending connection request
tcp::socket socket(io_context);
boost::asio::connect(socket, endpoints);
info("Connected.");
// create buffer for serialization
vector<uchar> imgbuf;
while (true) {
auto start_time = std::chrono::high_resolution_clock::now();
cap.read(image);
trace("Sending image");
sendImage(socket, image, imgbuf);
auto end_time = std::chrono::high_resolution_clock::now();
auto elapsed_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time);
// Calculate the remaining time to sleep
auto sleep_duration = std::chrono::milliseconds(FRAME_DELAY_MS) - elapsed_time;
// Sleep for the remaining duration if positive
if (sleep_duration.count() > 0) {
std::this_thread::sleep_for(sleep_duration);
}
}
}
catch (std::exception& e) {
error(e.what());
}
return 0;
}

61
flake.lock generated Normal file
View File

@@ -0,0 +1,61 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1756386758,
"narHash": "sha256-1wxxznpW2CKvI9VdniaUnTT2Os6rdRJcRUf65ZK9OtE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "dfb2f12e899db4876308eba6d93455ab7da304cd",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

63
flake.nix Normal file
View File

@@ -0,0 +1,63 @@
{
description = "video-streaming-poc devShell";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils, ... }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs { inherit system; };
in with pkgs;
let
opencv-custom = pkgs.opencv.override {
enableGtk3 = true;
#enableCuda = true;
enablePython = true;
};
in {
devShells.default = mkShell rec {
buildInputs = [
# Meson
meson
pkg-config
ninja
# Boost
boost
# OpenCV
opencv-custom
];
};
packages.default = pkgs.stdenv.mkDerivation {
name = "video-streaming-poc";
src = ./.;
nativeBuildInputs = with pkgs; [
meson
ninja
pkg-config
];
buildInputs = with pkgs; [
boost
opencv-custom
];
buildPhase = ''
meson setup --wipe build
meson compile
'';
installPhase = ''
mkdir -p $out/bin
cp build/client $out/bin/
cp build/server $out/bin/
'';
};
}
);
}

58
legacy/README.md Normal file
View File

@@ -0,0 +1,58 @@
# Video Streaming Proof of Concept
This project is a demo for streaming video output from multiple "client" devices to one "server". This is a basic demo
of what sauron-cv seeks to accomplish.
## Installation
It is strongly recommended that you use a virtual environment. These instructions will assume you are using venv, you
can substitute this with your preferred environment management. You will need to make sure that the `virtualenv` package
is installed globally, either via pip or the `python3-virtualenv` package in your system package manager.
When first cloning this repo, run the following:
```shell
python -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
```
This will create a virtual environment, enter that virtual environment, and install the required packages.
If you start a new shell, you will need to re-enter the virtual environment:
```shell
source .venv/bin/activate
```
## Running
### Client
To run the client with a localhost target:
```shell
python -m client
```
To target an external server, provide its IP address using the `-s` flag:
```shell
python -m client -s [server IP address]
```
### Server
```shell
python -m server
```
### Common Flags
Make sure that these match between your client and server!!
| Short | Long | Description | Default |
|-------|---|---|---|
| `-p` | `--port` | The port for the client / server to communicate on. | `5005` |
| `-W` | `--width` | Image width in pixels. | `640` |
| `-H` | `--height` | Image height in pixels. | `480` |

View File

@@ -5,37 +5,49 @@ import socket
import numpy as np import numpy as np
import uuid import uuid
from common import StdPacket, InterlacedPacket, DoublyInterlacedPacket from common import StdPacket, InterlacedPacket, DoublyInterlacedPacket, TiledImagePacket
def send_packet(sock, packet): def send_packet(sock, packet):
sock.sendto(packet, (UDP_IP, UDP_PORT)) sock.sendto(packet, (UDP_IP, UDP_PORT))
def breakdown_image_norm(frame): def breakdown_image_norm(frame, last_frame):
(cols, rows, colors) = frame.shape (cols, rows, colors) = frame.shape
# break the array down into 16x16 chunks, then transmit them as UDP packets # break the array down into 16x16 chunks, then transmit them as UDP packets
for i in range(0, cols, 16): for i in range(0, cols, 16):
for j in range(0, rows, 16): for j in range(0, rows, 16):
# print("Sending frame segment (%d, %d)", i, j) # print("Sending frame segment (%d, %d)", i, j)
pkt = StdPacket(uuid, j, i, frame[i:i + 16, j:j + 16]) arr = frame[i:i + 16, j:j + 16]
send_packet(sock, pkt.to_bytestr()) last_arr = last_frame[i:i + 16, j:j + 16]
# only update if image segments are different
if not np.allclose(arr, last_arr):
pkt = StdPacket(uuid, j, i, arr)
send_packet(sock, pkt.to_bytestr())
def breakdown_image_interlaced(frame): def breakdown_image_interlaced(frame, last_frame):
(cols, rows, colors) = frame.shape (cols, rows, colors) = frame.shape
# break the array into 16x32 chunks. we'll split those further into odd and even rows # break the array into 16x32 chunks. we'll split those further into odd and even rows
# and send each as UDP packets. this should make packet loss less obvious # and send each as UDP packets. this should make packet loss less obvious
for i in range(0, cols, 32): for i in range(0, cols, 32):
for j in range(0, rows, 16): for j in range(0, rows, 16):
# print("Sending frame segment (%d, %d)", i, j) # print("Sending frame segment (%d, %d)", i, j)
pkt = InterlacedPacket(uuid, j, i, False, frame[i:i + 32:2, j:j + 16]) arr = frame[i:i + 32:2, j:j + 16]
send_packet(sock, pkt.to_bytestr()) last_arr = last_frame[i:i + 32:2, j:j + 16]
if not np.allclose(arr, last_arr):
pkt = InterlacedPacket(uuid, j, i, False, arr)
send_packet(sock, pkt.to_bytestr())
for i in range(0, cols, 32): for i in range(0, cols, 32):
for j in range(0, rows, 16): for j in range(0, rows, 16):
# print("Sending frame segment (%d, %d)", i, j) # print("Sending frame segment (%d, %d)", i, j)
pkt = InterlacedPacket(uuid, j, i, True, frame[i + 1:i + 32:2, j:j + 16]) arr = frame[i + 1:i + 32:2, j:j + 16]
send_packet(sock, pkt.to_bytestr()) last_arr = last_frame[i + 1:i + 32:2, j:j + 16]
# only update if image segments are different
if not np.allclose(arr, last_arr):
pkt = InterlacedPacket(uuid, j, i, True, arr)
send_packet(sock, pkt.to_bytestr())
def breakdown_image_dint(frame): def breakdown_image_dint(frame, last_frame):
(cols, rows, colors) = frame.shape (cols, rows, colors) = frame.shape
# break the array into 16x32 chunks. we'll split those further into odd and even rows # break the array into 16x32 chunks. we'll split those further into odd and even rows
# and send each as UDP packets. this should make packet loss less obvious # and send each as UDP packets. this should make packet loss less obvious
@@ -45,8 +57,26 @@ def breakdown_image_dint(frame):
# print("Sending frame segment (%d, %d)", i, j) # print("Sending frame segment (%d, %d)", i, j)
i_even = l % 2 == 0 i_even = l % 2 == 0
j_even = l >= 2 j_even = l >= 2
pkt = DoublyInterlacedPacket(uuid, j, i, j_even, i_even, frame[i + i_even:i + 32:2, j + j_even:j + 32:2])
send_packet(sock, pkt.to_bytestr()) # breakdown image
arr = frame[i + i_even:i + 32:2, j + j_even:j + 32:2]
last_arr = last_frame[i + i_even:i + 32:2, j + j_even:j + 32:2]
# only update if image segments are different
if not np.allclose(arr, last_arr):
pkt = DoublyInterlacedPacket(uuid, j, i, j_even, i_even, arr)
send_packet(sock, pkt.to_bytestr())
def breakdown_image_tiled(frame):
(cols, rows, colors) = frame.shape
# break the array into 16x32 chunks. we'll split those further into odd and even rows
# and send each as UDP packets. this should make packet loss less obvious
xslice = cols // 16
yslice = rows // 16
for i in range(0, xslice):
for j in range(0, yslice):
# print("Sending frame segment (%d, %d)", i, j)
pkt = TiledImagePacket(uuid, j, i, rows, cols, frame[i:cols:xslice, j:rows:yslice])
send_packet(sock, pkt.to_bytestr())
if __name__ == '__main__': if __name__ == '__main__':
# argument parser # argument parser
@@ -80,7 +110,11 @@ if __name__ == '__main__':
# create the socket # create the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
frame = np.zeros((HEIGHT, WIDTH, 3), dtype=np.uint8)
last_frame = np.zeros((HEIGHT, WIDTH, 3), dtype=np.uint8)
while True: while True:
last_frame = frame.copy()
# Capture frame-by-frame # Capture frame-by-frame
ret, frame = cap.read() ret, frame = cap.read()
@@ -89,7 +123,7 @@ if __name__ == '__main__':
print("Can't receive frame (stream end?). Exiting ...") print("Can't receive frame (stream end?). Exiting ...")
break break
breakdown_image_dint(frame) breakdown_image_dint(frame, last_frame)
# Release the capture and close all windows # Release the capture and close all windows
cap.release() cap.release()

View File

@@ -134,3 +134,38 @@ def from_bytes_dint(b: bytes) -> DoublyInterlacedPacket:
array = np.frombuffer(b[28:], np.uint8).reshape(16, 16, 3) array = np.frombuffer(b[28:], np.uint8).reshape(16, 16, 3)
return DoublyInterlacedPacket(uuid, x, y, even_x, even_y, array) return DoublyInterlacedPacket(uuid, x, y, even_x, even_y, array)
class TiledImagePacket(Packet):
"""Distributed selection from image."""
size = 16 + 4 + 4 + 768
def __init__(self, uuid: UUID, x: int, y: int, width: int, height: int, array: np.ndarray):
super().__init__(uuid, x, y, array)
self.width = width
self.height = height
self.xslice = width // 16
self.yslice = height // 16
def to_bytestr(self) -> bytes:
bytestr = b""
bytestr += self.uuid.bytes
bytestr += self.x.to_bytes(length=4, signed=False)
bytestr += self.y.to_bytes(length=4, signed=False)
bytestr += self.array.tobytes()
return bytestr
def apply(self, image: np.ndarray) -> np.ndarray:
x = self.x
y = self.y
arr = self.array
image[y:self.height:self.yslice, x:self.width:self.xslice] = arr
return image
def from_bytes_tiled(b: bytes) -> TiledImagePacket:
"""Convert a byte string obtained via UDP into a packet object."""
uuid = UUID(bytes = b[0:16])
x = int.from_bytes(b[16:20], signed = False)
y = int.from_bytes(b[20:24], signed = False)
array = np.frombuffer(b[24:], np.uint8).reshape(16, 16, 3)
return TiledImagePacket(uuid, x, y, 640, 480, array)

View File

@@ -1,3 +1,4 @@
opencv-python opencv-python
numpy numpy
rich rich
asyncudp

View File

@@ -5,6 +5,7 @@ import socket
import numpy as np import numpy as np
from datetime import datetime from datetime import datetime
import asyncio import asyncio
import asyncudp
import argparse import argparse
from rich.console import Console from rich.console import Console
@@ -34,33 +35,6 @@ class Client:
# Dictionary of client states stored by UUID # Dictionary of client states stored by UUID
frames: Dict[str, Client] = {} frames: Dict[str, Client] = {}
async def read_packet():
"""Asynchronous coroutine to read UDP packets from the client(s)."""
while True:
# we repeat this a ton of times all at once to hopefully capture all of the image data
for i in range(0, 1600):
try:
data, addr = sock.recvfrom(DoublyInterlacedPacket.size) # packet buffer size based on the packet size
# print("received packet from", addr)
if data:
pkt = from_bytes_dint(data)
uuid = str(pkt.uuid)
# if this is a new client, give it a new image
if uuid not in frames.keys():
console.log(f"New client acquired, naming [bold cyan]{uuid}[bold cyan]")
frames[uuid] = Client()
stat.update(f"[bold yellow]{len(frames.keys())}[/bold yellow] clients connected.")
frames[uuid].update(pkt)
except BlockingIOError:
pass
# this is necessary to allow asyncio to swap between reading packets and rendering frames
await asyncio.sleep(0.001)
async def show_frames(): async def show_frames():
"""Asynchronous coroutine to display frames in OpenCV debug windows.""" """Asynchronous coroutine to display frames in OpenCV debug windows."""
while True: while True:
@@ -70,20 +44,43 @@ async def show_frames():
console.log(f"Client likely lost connection, dropping [bold red]{id}[/bold red]") console.log(f"Client likely lost connection, dropping [bold red]{id}[/bold red]")
cv2.destroyWindow(id) cv2.destroyWindow(id)
frames.pop(id) frames.pop(id)
stat.update(f"[bold yellow]{len(frames.keys())}[/bold yellow] clients connected.")
else: else:
# show the latest available frame # show the latest available frame
cv2.imshow(id, frames[id].read()) cv2.imshow(id, frames[id].read())
cv2.waitKey(1) cv2.waitKey(1)
# this is necessary to allow asyncio to swap between reading packets and rendering frames # this is necessary to allow asyncio to swap between reading packets and rendering frames
await asyncio.sleep(0.01) await asyncio.sleep(0.05)
async def listen(ip: str, port: int):
"""Asynchronous coroutine to listen for / read client connections."""
sock = await asyncudp.create_socket(local_addr=(ip, port))
console.log("Ready to accept connections.", style="bold green")
while True:
# receive packets
data, addr = await sock.recvfrom()
if data:
# convert the byte string into a packet object
pkt = from_bytes_dint(data)
uuid = str(pkt.uuid)
# if this is a new client, give it a new image
if uuid not in frames.keys():
console.log(f"New client acquired, naming [bold cyan]{uuid}[bold cyan]")
frames[uuid] = Client()
frames[uuid].update(pkt)
if __name__ == "__main__": if __name__ == "__main__":
# argument parser # argument parser
parser = argparse.ArgumentParser(description="Proof-of-concept server for sauron-cv") parser = argparse.ArgumentParser(description="Proof-of-concept server for sauron-cv")
parser.add_argument("-p", "--port", type=int, default=5005) parser.add_argument("-p", "--port", type=int, default=5005)
parser.add_argument("-l", "--listen", type=str, default="") parser.add_argument("-l", "--listen", type=str, default="0.0.0.0")
parser.add_argument("-W", "--width", type=int, default=640) parser.add_argument("-W", "--width", type=int, default=640)
parser.add_argument("-H", "--height", type=int, default=480) parser.add_argument("-H", "--height", type=int, default=480)
args = parser.parse_args() args = parser.parse_args()
@@ -98,28 +95,18 @@ if __name__ == "__main__":
HEIGHT = args.height HEIGHT = args.height
WIDTH = args.width WIDTH = args.width
# create the UDP socket # create the async event loop
sock = socket.socket(socket.AF_INET, # Internet loop = asyncio.new_event_loop()
socket.SOCK_DGRAM) # UDP asyncio.set_event_loop(loop)
sock.setblocking(False)
sock.bind((UDP_IP, UDP_PORT))
console.log("Ready to accept connections.", style="bold green") # create async tasks for reading network packets, displaying windows
loop.create_task(listen(UDP_IP, UDP_PORT))
loop.create_task(show_frames())
try:
loop.run_forever()
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
with console.status("[bold yellow]0[/bold yellow] clients connected.", spinner="pong") as stat: # Release the capture and close all windows
cv2.destroyAllWindows()
# create the async event loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# create async tasks for reading network packets, displaying windows
loop.create_task(read_packet())
loop.create_task(show_frames())
try:
loop.run_forever()
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
# Release the capture and close all windows
cv2.destroyAllWindows()

170
logging.h Normal file
View File

@@ -0,0 +1,170 @@
#ifndef LOGGING_H
#define LOGGING_H
#include <iostream>
#include <iomanip>
#include <chrono>
#include <ctime>
// Logging levels
#define LOG_ERROR 0
#define LOG_WARN 1
#define LOG_INFO 2
#define LOG_DEBUG 3
#define LOG_TRACE 4
// Set logging arguments
// TODO: set these by compiler arguments or env vars or something
#define LOGGING LOG_DEBUG // logging level
#define LOGGING_COLOR true // enable color
#define LOGGING_TIMESTAMP true // enable timestamp
#define LOGGING_TIMESTAMP_FMT "%Y-%m-%dT%H:%M:%S%z" // timestamp format (local time)
#define LOGGING_POSITION false // display position (only works on C++20 or newer)
// Color codes
#define ANSI_RESET "\033[0m"
#define ANSI_BLACK "\033[30m" /* Black */
#define ANSI_RED "\033[31m" /* Red */
#define ANSI_GREEN "\033[32m" /* Green */
#define ANSI_YELLOW "\033[33m" /* Yellow */
#define ANSI_BLUE "\033[34m" /* Blue */
#define ANSI_MAGENTA "\033[35m" /* Magenta */
#define ANSI_CYAN "\033[36m" /* Cyan */
#define ANSI_WHITE "\033[37m" /* White */
#define ANSI_BOLD "\033[1m" /* Bold */
template <typename... Args>
void print(Args... args) {
(std::cout << ... << args) << "\n";
}
inline void printTimestamp() {
#if LOGGING_TIMESTAMP
auto now = std::chrono::system_clock::now();
auto time_c = std::chrono::system_clock::to_time_t(now);
std::tm time_tm;
localtime_r(&time_c, &time_tm);
std::cout << std::put_time(&time_tm, LOGGING_TIMESTAMP_FMT) << ": ";
#endif
}
// if we're on C++20 or later, then use the source_location header and add source location to logs
#if __cplusplus >= 202002L && LOGGING_POSITION
#include <source_location>
inline void printPosition(std::source_location& location) {
std::cout << location.file_name() << ":" << location.function_name() << ":" << ANSI_CYAN << location.line() << ANSI_RESET << ": ";
}
inline void printHeader(std::string name, std::string color, std::source_location& location) {
#if LOGGING_COLOR
std::cout << ANSI_BOLD << color << "[" << name << "] " << ANSI_RESET;
printTimestamp();
printPosition(location);
#else
printHeader(name);
#endif
}
inline void printHeader(std::string name, std::source_location& location) {
std::cout << "[" << name << "] ";
printTimestamp();
printPosition(location);
}
template <typename... Args, typename Sl = std::source_location>
void trace(Args... args, Sl location = std::source_location::current()) {
#if LOGGING >= LOG_TRACE
printHeader("TRACE", ANSI_CYAN, location);
print(args...);
#endif
}
template <typename... Args, typename Sl = std::source_location>
void debug(Args... args, Sl location = std::source_location::current()) {
#if LOGGING >= LOG_DEBUG
printHeader("DEBUG", ANSI_MAGENTA, location);
print(args...);
#endif
}
template <typename... Args, typename Sl = std::source_location>
void info(Args... args, Sl location = std::source_location::current()) {
#if LOGGING >= LOG_INFO
printHeader("INFO", ANSI_GREEN, location);
print(args...);
#endif
}
template <typename... Args, typename Sl = std::source_location>
void warn(Args... args, Sl location = std::source_location::current()) {
#if LOGGING >= LOG_WARN
printHeader("WARN", ANSI_YELLOW, location);
print(args...);
#endif
}
template <typename... Args, typename Sl = std::source_location>
void error(Args... args, Sl location = std::source_location::current()) {
#if LOGGING >= LOG_ERROR
printHeader("ERROR", ANSI_RED, location);
print(args...);
#endif
}
#else
inline void printHeader(std::string name, std::string color) {
#if LOGGING_COLOR
std::cout << ANSI_BOLD << color << "[" << name << "] " << ANSI_RESET;
printTimestamp();
#else
printHeader(name);
#endif
}
inline void printHeader(std::string name) {
std::cout << "[" << name << "] ";
printTimestamp();
}
template <typename... Args>
void trace(Args... args) {
#if LOGGING >= LOG_TRACE
printHeader("TRACE", ANSI_CYAN);
print(args...);
#endif
}
template <typename... Args>
void debug(Args... args) {
#if LOGGING >= LOG_DEBUG
printHeader("DEBUG", ANSI_MAGENTA);
print(args...);
#endif
}
template <typename... Args>
void info(Args... args) {
#if LOGGING >= LOG_INFO
printHeader("INFO", ANSI_GREEN);
print(args...);
#endif
}
template <typename... Args>
void warn(Args... args) {
#if LOGGING >= LOG_WARN
printHeader("WARN", ANSI_YELLOW);
print(args...);
#endif
}
template <typename... Args>
void error(Args... args) {
#if LOGGING >= LOG_ERROR
printHeader("ERROR", ANSI_RED);
print(args...);
#endif
}
#endif
#endif //LOGGING_H

37
meson.build Normal file
View File

@@ -0,0 +1,37 @@
#=======================================================================================================================
# PROJECT SETTINGS
#=======================================================================================================================
project('video-streaming-poc', 'cpp', version : '0.0.1-SNAPSHOT',
default_options : ['c_std=c17', 'cpp_std=c++20'])
#=======================================================================================================================
# DEPENDENCIES
#=======================================================================================================================
# opencv dependency
opencv = dependency('opencv4', version : '>=4.0.0')
opencv_incl_dir = opencv.get_variable(cmake : 'OpenCV_INCLUDE_DIRECTORIES', pkgconfig : 'includedir')
include = include_directories(opencv_incl_dir)
# boost dependency
boost = dependency('boost')
#=======================================================================================================================
# SOURCE FILES
#=======================================================================================================================
# common files between client / server
common = ['transfer.h', 'logging.h']
# client-only files
client = common + ['client.cpp']
# server-only files
server = common + ['server.cpp']
#=======================================================================================================================
# BUILD TARGETS
#=======================================================================================================================
# client executable
client_exe = executable('client', client,
dependencies : [opencv, boost],
include_directories : include)
# server executable
server_exe = executable('server', server,
dependencies : [opencv, boost],
include_directories : include)

56
server.cpp Normal file
View File

@@ -0,0 +1,56 @@
#include <opencv2/highgui.hpp>
#include <opencv2/core/mat.hpp>
#include <opencv2/imgproc.hpp>
#include <cstring>
#include <iostream>
#include <netinet/in.h>
#include <sys/socket.h>
#include <unistd.h>
#include "transfer.h"
#include "logging.h"
#include <boost/asio.hpp>
using namespace std;
using boost::asio::ip::tcp;
int main() {
try {
boost::asio::io_context io_context;
// creating socket
tcp::acceptor acceptor(io_context, tcp::endpoint(tcp::v4(), 8080));
tcp::socket socket(io_context);
info("Ready to accept connections.");
// accepting connection request
acceptor.accept(socket);
info("Client connected.");
// TODO: handle multiple images
cv::Mat image = cv::Mat::zeros(cv::Size(640, 480), CV_8UC3);
bool running = true;
// TODO: make this asynchronous. probably do that in tandem with setting up networking
while (running) {
// receive data
vector<uchar> buffer;
trace("Receiving image");
int latency = recvImage(socket, buffer);
trace("Applying new data to image");
applyImage(image, &buffer);
cv::putText(image, to_string(latency), cv::Point(0, 480), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255, 0, 0), 1, cv::LINE_AA);
trace("Displaying image");
imshow("image", image);
running = cv::waitKey(30) != 27;
}
}
catch (std::exception& e) {
error(e.what());
}
return 0;
}

108
transfer.h Normal file
View File

@@ -0,0 +1,108 @@
#ifndef TRANSFER_H
#define TRANSFER_H
#include <chrono>
#include <opencv2/core/mat.hpp>
#include <opencv2/imgcodecs.hpp>
#include <sys/socket.h>
#include "logging.h"
#include <vector>
#include <boost/asio.hpp>
using namespace std;
using boost::asio::ip::tcp;
struct imageHeader {
size_t size;
chrono::milliseconds timestamp;
};
chrono::milliseconds getMillis() {
// Get current time
const auto now = chrono::system_clock::now();
// Get time since epoch in milliseconds
const chrono::duration ms = chrono::duration_cast<chrono::milliseconds>(now.time_since_epoch());
return chrono::milliseconds(ms.count());
}
inline void serializeImage(const cv::Mat& image, std::vector<uchar>& buffer) {
cv::imencode(".jpg", image, buffer);
}
void sendImage(tcp::socket& socket, const cv::Mat& image, std::vector<uchar>& buffer) {
boost::system::error_code e;
serializeImage(image, buffer);
size_t totalSent = 0;
// first send the size of the serialized image
const size_t size = buffer.size();
const chrono::milliseconds timestamp = getMillis();
imageHeader header;
header.size = size;
header.timestamp = timestamp;
trace("Buffer size: ", size);
if (const ssize_t sent = boost::asio::write(socket, boost::asio::buffer(&header, sizeof(header)), e); sent == -1) {
throw boost::system::system_error(e);
}
// then start sending the serialized image
while (totalSent < size) {
const ssize_t sent = boost::asio::write(socket, boost::asio::buffer(buffer), e);
if (sent == -1) {
throw boost::system::system_error(e);
}
totalSent += sent;
debug("Packet sent (", sent, " bytes, total ", totalSent, " / ", size, " bytes)");
}
}
int recvImage(tcp::socket& socket, std::vector<uchar>& buffer) {
boost::system::error_code e;
// first receive the size of the image
imageHeader header;
size_t len = 0;
while (len < sizeof(imageHeader)) {
len += socket.read_some(boost::asio::buffer(&header, sizeof(header)), e);
if (e.failed()) {
throw boost::system::system_error(e);
}
}
size_t dataSize = header.size;
chrono::milliseconds sentTime = header.timestamp;
trace("Buffer size: ", dataSize);
// resize the buffer to fit the whole image
buffer.resize(dataSize);
// start receiving the image until the whole thing arrives
size_t totalReceived = 0;
while (totalReceived < dataSize) {
size_t bytesReceived = boost::asio::read(socket, boost::asio::buffer(buffer), e);
if (e == boost::asio::error::eof) // Connection closed cleanly by peer.
throw boost::system::system_error(e);
else if (e)
throw boost::system::system_error(e);
totalReceived += bytesReceived;
debug("Packet received (", bytesReceived, " bytes, total ", totalReceived, " / ", dataSize, " bytes)");
}
chrono::milliseconds currentTime = getMillis();
chrono::milliseconds diff = currentTime - sentTime;
debug("Packet latency: ", diff.count(), "ms");
return diff.count();
}
bool applyImage(cv::Mat& image, std::vector<uchar> *src) {
// decode the image into an OpenCV Mat
cv::imdecode(*src, cv::IMREAD_UNCHANGED, &image);
return true;
}
#endif //TRANSFER_H