.NET HttpClient extensibility: Support for UnixDomain Sockets & Named Pipes

.NET HttpClient is main interface for sending HTTP requests and receiving HTTP responses from a resource identified by a URI. Generally this API supported TCP stack. In .NET 5 , library is extended to support other transports such as Unix Domain Sockets & NamedPipes because many web server on Unix supported HTTP server implementations using Unix Domain Sockets . Windows also added Unix domain socket support in Windows 10 ( for more details see here)

ASP.NET Core web server (Kestrel) also added support for UNIX domain sockets and named pipes( from .NET 8 on-wards).

Alternative transport options is introduced in HttpClient SocketsHttpHandler. With this it is possible to connect to server with Unix Domain Socket or Named Pipes.

HttpClient with Unix Domain Socket

SocketsHttpHandler socketsHttpHandler = new SocketsHttpHandler();
// Custom connection callback that connects to Unixdomain Socket
socketsHttpHandler.ConnectCallback = async (sockHttpConnContext, ctxToken) =>
{
    Uri dockerEngineUri = new Uri("unix:///var/run/docker.sock");
    var socket = new Socket(AddressFamily.Unix, SocketType.Stream, ProtocolType.IP);

    var endpoint = new UnixDomainSocketEndPoint(dockerEngineUri.AbsolutePath);
    await socket.ConnectAsync(endpoint, ctxToken);
    return new NetworkStream(socket);
};

// create HttpClient with SocketsHttpHandler
var httpClient  = new HttpClient(socketsHttpHandler);
// make Http Request .

HttpClient with Named Pipe

SocketsHttpHandler socketsHttpHandler = new SocketsHttpHandler();
// Custom connection callback that connects to NamedPiper server
socketsHttpHandler.ConnectCallback = async (sockHttpConnContext, ctxToken) =>
{
    Uri dockerEngineUri = new Uri("npipe://./pipe/docker_engine");
    NamedPipeClientStream pipeClientStream = new NamedPipeClientStream(dockerEngineUri.Host,
                                            dockerEngineUri.Segments[2],
                                            PipeDirection.InOut, PipeOptions.Asynchronous);
    await pipeClientStream.ConnectAsync(ctxToken);
    return pipeClientStream;
};

// create HttpClient with SocketsHttpHandler
var httpClient  = new HttpClient(socketsHttpHandler);
// make Http Request .
        

Complete demonstration sample that uses HttpClient connecting to Docker daemon serving REST API via NamedPipe on Windows & Unix Domain socket on Linux.


using System.IO.Pipes;
using System.Net.Http;
using System.Net.Http.Json;
using System.Net.Sockets;
using System.Runtime.InteropServices;

HttpClient client = CreateHttpClientConnectionToDockerEngine();
String dockerUrl = "http://localhost/v1.41/containers/json";
var containers = client.GetFromJsonAsync<List<Container>>(dockerUrl).GetAwaiter().GetResult();
Console.WriteLine("Container List:...");
foreach (var item in containers)
{
    Console.WriteLine(item);
}


// Create HttpClient to Docker Engine using NamedPipe & UnixDomain
HttpClient CreateHttpClientConnectionToDockerEngine()
{
    SocketsHttpHandler socketsHttpHandler =
        RuntimeInformation.IsOSPlatform(OSPlatform.Windows) switch
        {
            true => GetSocketHandlerForNamedPipe(),
            false => GetSocketHandlerForUnixSocket(),
        };
    return new HttpClient(socketsHttpHandler);

    // Local function to create Handler using NamedPipe
    static SocketsHttpHandler GetSocketHandlerForNamedPipe()
    {
        Console.WriteLine("Connecting to Docker Engine using Named Pipe:");
        SocketsHttpHandler socketsHttpHandler = new SocketsHttpHandler();
        // Custom connection callback that connects to NamedPiper server
        socketsHttpHandler.ConnectCallback = async (sockHttpConnContext, ctxToken) =>
        {
            Uri dockerEngineUri = new Uri("npipe://./pipe/docker_engine");
            NamedPipeClientStream pipeClientStream = new NamedPipeClientStream(dockerEngineUri.Host,
                                                    dockerEngineUri.Segments[2],
                                                    PipeDirection.InOut, PipeOptions.Asynchronous);
            await pipeClientStream.ConnectAsync(ctxToken);
            return pipeClientStream;
        };
        return socketsHttpHandler;
    }
    // Local function to create Handler using Unix Socket
    static SocketsHttpHandler GetSocketHandlerForUnixSocket()
    {
        Console.WriteLine("Connecting to Docker Engine using Unix Domain Socket:");
        SocketsHttpHandler socketsHttpHandler = new SocketsHttpHandler();
        // Custom connection callback that connects to Unixdomain Socket
        socketsHttpHandler.ConnectCallback = async (sockHttpConnContext, ctxToken) =>
        {
            Uri dockerEngineUri = new Uri("unix:///var/run/docker.sock");
            var socket = new Socket(AddressFamily.Unix, SocketType.Stream, ProtocolType.IP);

            var endpoint = new UnixDomainSocketEndPoint(dockerEngineUri.AbsolutePath);
            await socket.ConnectAsync(endpoint, ctxToken);
            return new NetworkStream(socket);
        };
        return socketsHttpHandler;
    }
}

/// <summary>
/// Record class to hold the container information
/// </summary>
/// <param name="Names"></param>
/// <param name="Image"></param>
/// <param name="ImageID"></param>
/// <param name="Command"></param>
/// <param name="State"></param>
/// <param name="Status"></param>
/// <param name="Created"></param>
public record Container(List<String> Names, String Image, String ImageID,
                        String Command, String State, String Status, 
                        int Created);

Conclusion:

.NET HttpClient extensibility allows connecting to HTTP server implemented using non TCP transport such as Unix Domain Socket & Named Pipes.

Reference:

SocketsHttpHandler Extension Points

Docker Engine REST API Reference

Java Virtual Threads for high-performance Java Applications

Java virtual threads are a new type of thread that was introduced in Java 19 as a preview feature and is now available as a standard feature in Java 21. Virtual threads are managed by the JVM and do not require an equivalent number of platform threads or operating system threads. This means that virtual threads are much more lightweight than traditional threads and can be used to scale applications to thousands or even millions of concurrent tasks.

Virtual threads are especially well-suited for applications that perform a lot of I/O, such as web servers, database applications, and microservices. This is because virtual threads can be suspended while waiting for I/O to complete, without blocking the underlying platform thread or OS thread. This allows the JVM to continue running other virtual threads on the same platform thread, which can significantly improve performance

How to create virtual threads: ( All the samples are compiled & ran on Java 21)

        // various ways of creating virtual threads

        {
            // example 1
            var runningThreead = Thread.startVirtualThread(() -> {
                //Code to execute in virtual thread
                System.out.println(STR. "Is Virtual Thread: \{ Thread.currentThread().isVirtual() } ,Name:\{Thread.currentThread() }" );
            });
            runningThreead.join();
        }
        {
            // example 2
            Runnable runnable = () -> System.out.println(STR. "Is Virtual Thread: \{ Thread.currentThread().isVirtual() } ,Name:\{Thread.currentThread() }" );
            Thread virtualThread = Thread.ofVirtual().start(runnable);
            virtualThread.join();
        }
        {
             // example 3
            // Using Executors.newVirtualThreadPerTaskExecutor()
            try (var executor = Executors.newVirtualThreadPerTaskExecutor()) {
                    executor.submit(() -> {
                        System.out.println(STR. "Is Virtual Thread: \{ Thread.currentThread().isVirtual() } ,Name:\{Thread.currentThread()}" );
                        try {
                            Thread.sleep(Duration.ofSeconds(1));
                        } catch (InterruptedException e) {
                            throw new RuntimeException(e);
                        }
                    });

            }
        }

Following example demonstrate the use of Java Virtual Threads in simple TCPEcho server & Client implementation.

Echo server that uses that uses Executors.newVirtualThreadPerTaskExecutor() to handle large number of concurrent connections without creating large platform or OS threads

package org.example;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.logging.ConsoleHandler;
import java.util.logging.Logger;

public class EchoServer {
    private static Logger logger = Logger.getLogger("EchoServer");

    public static void main(String[] args) {
        CountDownLatch countDownLatch = new CountDownLatch(1);
        // Capture shutdown requests from the Virtual Machine.
        // This can occur when a user types Ctrl+C at the console
        Runtime.getRuntime().addShutdownHook(new Thread() {
            @Override
            public void run() {
                countDownLatch.countDown();
            }
        });


        try (var serverSocket = new ServerSocket(4444);
             var executors = Executors.newVirtualThreadPerTaskExecutor()) {
            logger.info("Accepting incoming connections on port " + serverSocket.getLocalPort());
            while (countDownLatch.getCount() != 0) {
                var clientSocket = serverSocket.accept();
                logger.info("Accepted connection from " + clientSocket.getRemoteSocketAddress());
                // submit Runnable task to  VirtualThread Executor
                executors.submit(new ClientHandler(clientSocket));
            }
            serverSocket.close();
            logger.info("Stopped accepting incoming connections.");
        } catch (IOException e) {
            throw new RuntimeException(e);
        }

    }

    /**
     * Handle the connected client on VirtualThread Executor
     */
    public static class ClientHandler implements Runnable {

        private final Socket clientSocket;

        public ClientHandler(Socket clientSocket) {
            this.clientSocket = clientSocket;
        }

        @Override
        public void run() {
            try (var out = new PrintWriter(clientSocket.getOutputStream(), true);
                 var in = new BufferedReader(
                         new InputStreamReader(clientSocket.getInputStream()));) {

                String inputLine;
                while ((inputLine = in.readLine()) != null) {
                    if (".".equals(inputLine)) {
                        out.println("bye");
                        break;
                    }
                    out.println(inputLine.toUpperCase());
                }
            } catch (IOException e) {
                EchoServer.logger.warning("Lost connection to " + this.clientSocket.getRemoteSocketAddress());
            } finally {
                // close the client socket using try-with resource
                try (clientSocket) {
                    EchoServer.logger.info("Closing  connection to " + this.clientSocket.getRemoteSocketAddress());
                } catch (IOException e) {
                    EchoServer.logger.severe("Exception while closing the connection  " + this.clientSocket.getRemoteSocketAddress());
                }
            }

        }
    }
}

Test application that creates large number of connection to EchoServer.

package org.example;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.net.Socket;
import java.time.Duration;
import java.time.Instant;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
import java.util.logging.Logger;
import java.util.stream.IntStream;

public class TestApplication {
    private static Logger logger = Logger.getLogger("SimpleEchoClient");

    public static void main(String[] args) {
        int numberOfClients = 100;
        int numberOfMessagePerClient = 100;
        String ipAddresOrHostName = "localhost";
        int port = 4444;
        try (var executors = Executors.newVirtualThreadPerTaskExecutor()) {
            IntStream.range(0, numberOfClients).forEach(idx -> {
                executors.submit(new EchoClient(ipAddresOrHostName, port, numberOfMessagePerClient));
            });

        }
        logger.info("Done");

    }

    /**
     * Client Application that connects to EchoServer
     * and sends number of messages
     */
    public static class EchoClient implements Runnable {
        private final String ip;
        private final int port;
        private final int numberOfMessages;
        public EchoClient(String ip, int port, int numberOfMessages) {

            this.ip = ip;
            this.port = port;
            this.numberOfMessages = numberOfMessages;
        }
        @Override
        public void run() {
            try (var clientSocket = new Socket(ip, port);
                 var out = new PrintWriter(clientSocket.getOutputStream(), true);
                 var in = new BufferedReader(new InputStreamReader(clientSocket.getInputStream()));) {
                for (int index = 0; index < numberOfMessages; index++) {

                    out.println("Random messsage " + Instant.now().toString());
                    String resp = in.readLine();
                    logger.info("[Msg from server] " + resp);
                    // introduce delay to simulate the IO
                    Thread.sleep(Duration.ofMillis(500));
                }
            } catch (IOException e) {
                logger.severe("Unable to connect to remote client or Unable to read/write to connected client " + e.getMessage());
            } catch (InterruptedException e) {
                logger.severe("InterruptedException" + e.getMessage());
            }
        }
    }
}

Conclusion:

Java virtual threads are a powerful new feature that can be used to write scalable and efficient concurrent applications. Virtual threads are easy to use and do not require any special programming techniques. If you are developing a Java application that needs to handle a large number of concurrent tasks, then you should consider using virtual threads.

Reference:

Java 21 new feature: Virtual Threads

Beyond Loom: Weaving new concurrency patterns

Java Virtual Threads

JEP 444: Virtual Threads

Accessing Docker API from .NET Core

Recently I had to access container stats ( CPU & Memory) for the container apps on Linux system with limited access ( only ssh access via multiple intermediate jump servers). We could not install cAdvisor or other UI tools. I tried capturing container stats by running Docker stats command every 5 seconds but the output of the Docker stats less than ideal for analysis like plotting memory/cpu graphs.

So I decided to write simple tool using Docker.DotNet C# SDK . Requirement was to write a self contained utility that can run on Linux & log container stats entries in comma separate format(CSV) for easier analysis using excel.

This tool uses Docker.DotNet , a C# SDK that uses Docker REST API to interact with Docker Daemon and .NET self contained publish profile that produces a platform-specific executable.

Complete source code is available on my GitHub repo ,

Here is the sample run & the output

Output format is has follows ContainerName,CpuPercent,MemUsageInKB,MaxAviMemInKB,MemUsageInPercent,DateTime

Conclusion

The Docker API is a powerful tool that allows you to control Docker from your own applications. By accessing the Docker API from C#, you can automate Docker operations, create your own Docker tools, and integrate Docker with other systems.

Reference

Using MQTT to Deliver Messages to Software systems behind private network.

MQTT (Message Queuing Telemetry Transport) is a lightweight messaging protocol that is well-suited for delivering messages to software systems behind a private network/firewall. MQTT uses a publish-subscribe pattern, where software systems/devices publish messages to topics and subscribers can subscribe to topics to receive messages. This pattern is ideal for delivering messages to software behind a private network because it only requires the software to open a single outbound connection to the MQTT broker running on the internet.

Usecase for reaching into software systems running in the private network

  • Push configuration changes from the Cloud.
  • Change the log verbosity ( info->debug) of a software to debug software issues.
  • On demand access to performances of a software ( CPU/Memory/Network).
  • Initiate command/control from cloud.

To use MQTT to deliver messages to software behind a private network/firewall, you will need to:

  1. Set up an MQTT broker outside of the private network. This broker can be hosted on a public cloud platform, such as AWS IoT Core or Azure IoT Hub, or on a private server
  2. Configure MQTT broker to accept TCP or WebSocket connection.
  3. Configure the MQTT client to connect to the MQTT broker via TCP or WebSocket. This will typically involve specifying the broker’s hostname or IP address, port number, and authentication credentials.
  4. Subscribe to the topics that you want to receive messages from.

Here are some of the benefits of using MQTT

  • MQTT is lightweight and efficient: MQTT uses a binary message format and a publish/subscribe model, which makes it very efficient for delivering messages to a large number of clients.
  • MQTT is reliable: MQTT has built-in mechanisms for ensuring that messages are delivered reliably, even in the event of network outages or other disruptions.
  • MQTT is secure: MQTT can be used to deliver messages securely by encrypting the messages.

Following diagrams shows , a software ( background service,console app) running inside private network that can receive message from the MQTT broker running on internet.

Complete source code of the software implementing some of the use case can be found on my GitHub repository

Conclusion

MQTT is a powerful messaging protocol that can be used to deliver messages to software behind a private network or firewall.

Reference

Load Balanced DICOM Classic CStore Service deployed on Docker

This post covers the demonstration setup of DICOM CStore service with load balancer that supports scaling of DICOM request across multiple instance.

DICOM Classic CStore service is a DICOM image transfer service that allows DICOM Application Entities (Client’s) to send DICOM objects to a DICOM storage server (Server). The CStore service is based on the DICOM Imaging Network Communication Standard (DICOM Standard) and is defined in Part 7 of the DICOM Standard. DICOM is an application protocol built on top of TCP/IP stack/

The CStore service is a widely used in hospital Radiology where it is typically used to store DICOM images from medical imaging devices, such as MRI scanners and CT scanners.

The CStore service is a request/response service, which means that the sending AE ( client) sends a request to the receiving AE(server) to store a DICOM object. The receiving AE then sends a response to the sending AE indicating whether the DICOM object was successfully stored.

Following example demonstrate the setup where multiple instances of CStore service deployed in Docker load balanced using Nginx Proxy .

Complete source code & deployment script on my GitHub repo

Reference

DICOM Standard

Java DICOM Library Reference

Using host.docker.internal to access the Docker host from inside a container

Docker containers are isolated from the host machine, which means that they cannot access the host machine’s network by default. However, there are a ways to allow containers to access the host machine’s network, one of which is to use the host.docker.internal hostname.

host.docker.internal is a special hostname that resolves to the IP address of the Docker host machine. This means that containers can access the host machine’s network by connecting to host.docker.internal.

One common use case for using host.docker.internal is to allow a container to access a service ex: database that is running on the host machine. For example, you might have a container that is running a web application that needs to access a MySQL database that is running on the host machine as shown in the diagram below. By configuring web application to use host.docker.internal with port number , application can reach the database service.

Below example shows connecting to service running on host from inside the container on Docker desktop running on Windows

On Windows Docker installation accessing host.docker.internal is automatic, on Linux this requires adding --add-host command line option for docker run command .

docker run --rm -it –add-host=host.docker.internal:host-gateway <<container>>

Conclusion:

host.docker.internal is a useful tool for accessing services running on the host machine from inside a container. It is easy to use, portable, and reliable.

Reference

Docker Networking Documentation

Docker Run command line options

Docker Init CLI with ASP.NET Core application

Docker Init CLI is a new command-line interface (CLI) command that simplifies the process of adding Docker to a project. It can be used to generate a Dockerfile and Docker Compose file for an ASP.NET Core application, as well as build and run the application in a container.

To use Docker Init CLI with an ASP.NET Core application, first install the Docker CLI and Docker Compose or install Docker Desktop 4.23 or greater Then, navigate to the directory of your ASP.NET Core application and run the following command:

docker init

Based on your current project settings, cli will prompt for series of questions, you can choose to use defaults and the end it will generate a Dockerfile and Docker Compose file in the current directory.

To build the Docker image for your ASP.NET Core application, run the following command:

docker build -t aspnetapp .

This will build the Docker image and tag it with the name aspnetapp.

To run the ASP.NET Core application in a container, use docker run or docker-compose up

docker-compose up -d

This will start the Docker Compose stack, which will create and start a container for your ASP.NET Core application. You can now access the ASP.NET Core application at http://localhost:8080 in your web browser.

Conclustion

Docker Init CLI makes it easy to add Docker to an ASP.NET Core project. It generates a Dockerfile and Docker Compose file for you, so you don’t have to write them yourself.

Reference

Docker Init Cli command Reference

Docker Desktop 4.24

Unix domain socket support in Windows

UNIX domain sockets enable efficient inter process communication between the processes running on the same machine.,UNIX domain socket uses file pathname to identify the server instead of  an IP address and port. Support for UNIX domain sockets has existed on many flavors UNIX & LINUX for the longest time but this facility was no available on Windows. On Windows  if you want to do  Local IPC  then you had to use named pipes . Named pipe had different API’s then sockets., This difference made porting UNIX domain sockets based application to Windows difficult.

Windows 10 Version 1803  brings native support of UNIX domain sockets to Windows. You can program in “C”  with Windows SDK version 1803 (10.0.17134 or greater ) and program in C# using .NET core 2.1 ,

Here is how to check support for UNIX domain sockets on Windows.

“sc query afunix” from a Windows admin command prompt

image

Here is the C#  demo sample


using System;
using System.IO;
using System.Net.Sockets;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
namespace UnixSocketsDemo
{
class UnixSocketsOnWindows
{
public static void Demo()
{
string path = Path.Combine(Path.GetTempPath(), Path.GetRandomFileName());
var task = Task.Run(() =>
{
StartServer(path);
});
// wait for server to start
Thread.Sleep(2000);
StartClient(path);
Console.ReadLine();
}
private static void StartClient(String path)
{
var endPoint = new UnixDomainSocketEndPoint(path);
try
{
using (var client = new Socket(AddressFamily.Unix, SocketType.Stream, ProtocolType.Unspecified))
{
client.Connect(endPoint);
Console.WriteLine($"[Client] Connected to … ..{path}");
String str = String.Empty;
var bytes = new byte[100];
while (!str.Equals("exit", StringComparison.InvariantCultureIgnoreCase))
{
Console.WriteLine("[Client]Enter something: ");
var line = Console.ReadLine();
client.Send(Encoding.UTF8.GetBytes(line));
Console.Write("[Client]From Server: ");
int byteRecv = client.Receive(bytes);
str = Encoding.UTF8.GetString(bytes, 0, byteRecv);
Console.WriteLine(str);
}
}
}
finally
{
try { File.Delete(path); }
catch { }
}
}
private static void StartServer(String path)
{
var endPoint = new UnixDomainSocketEndPoint(path);
try
{
using (var server = new Socket(AddressFamily.Unix, SocketType.Stream, ProtocolType.Unspecified))
{
server.Bind(endPoint);
Console.WriteLine($"[Server] Listening … ..{path}");
server.Listen(1);
using (Socket accepted = server.Accept())
{
Console.WriteLine("[Server]Connection Accepted …" + accepted.RemoteEndPoint.ToString());
var bytes = new byte[100];
while (true)
{
int byteRecv = accepted.Receive(bytes);
String str = Encoding.UTF8.GetString(bytes, 0, byteRecv);
Console.WriteLine("[Server]Received " + str);
accepted.Send(Encoding.UTF8.GetBytes(str.ToUpper()));
}
}
}
}
finally
{
try { File.Delete(path); }
catch { }
}
}
}
class Program
{
static void Main(string[] args)
{
Console.WriteLine("Hello World!");
}
}
}

 

Resource