Compare commits

...

10 Commits

Author SHA1 Message Date
b984f5ca08 Containerized build and socket activation docs
Some checks failed
Linux CI / run-tests (push) Has been cancelled
Windows CI / run-tests (push) Has been cancelled
Build Containers / build-and-push (cpu) (push) Failing after -1m27s
Build Containers / build-and-push (cuda) (push) Failing after -1m28s
Build Containers / build-and-push (intel) (push) Failing after -1m28s
Build Containers / build-and-push (musa) (push) Failing after -1m28s
Build Containers / build-and-push (vulkan) (push) Failing after -1m28s
Build Containers / delete-untagged-containers (push) Has been skipped
Close inactive issues / close-issues (push) Successful in -1m31s
2025-11-20 00:18:58 +01:00
553d8659b7 Corrected implementation using coreOS systemd library 2025-11-20 00:18:07 +01:00
86f433c0c2 Add systemd socket support; incomplete but works 2025-11-19 23:28:04 +01:00
Ryan Steed
86e9b93c37 proxy,ui: add version endpoint and display version info in UI (#395)
- Add /api/version endpoint to ProxyManager that returns build date, commit hash, and version
- Implement SetVersion method to configure version info in ProxyManager
- Add version info fetching to APIProvider and display in ConnectionStatus component
- Include version info in UI context and update dependencies
- Add tests for version endpoint functionality
2025-11-17 10:43:47 -08:00
Ryan Steed
3acace810f proxy: add configurable logging timestamp format (#401)
introduces a new configuration option logTimeFormat that allows customizing the timestamp in log messages using golang's built in time format constants. The default remains no timestamp.
2025-11-16 10:21:59 -08:00
Ryan Steed
554d29e87d feat: enhance model listing to include aliases (#400)
introduce includeAliasesInList as a new configuration setting (default false) that includes aliases in v1/models

Fixes #399
2025-11-15 14:35:26 -08:00
Benson Wong
3567b7df08 Update image in README.md for web UI section 2025-11-08 15:29:37 -08:00
Benson Wong
38738525c9 config.example.yaml: add modeline for schema validation 2025-11-08 15:08:55 -08:00
Benson Wong
c0fc858193 Add configuration file JSON schema (#393)
* add json schema for configuration
* add GH action to validate schema
2025-11-08 15:04:14 -08:00
Benson Wong
b429349e8a add /ui/ to wol-proxy polling (#388) 2025-11-08 14:16:12 -08:00
22 changed files with 809 additions and 54 deletions

41
.github/workflows/config-schema.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
name: Validate JSON Schema
on:
pull_request:
paths:
- "config-schema.json"
push:
branches:
- main
paths:
- "config-schema.json"
workflow_dispatch:
jobs:
validate-schema:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Validate JSON Schema
run: |
# Check if the file is valid JSON
if ! jq empty config-schema.json 2>/dev/null; then
echo "Error: config-schema.json is not valid JSON"
exit 1
fi
# Validate that it's a valid JSON Schema
# Check for required $schema field
if ! jq -e '."$schema"' config-schema.json > /dev/null; then
echo "Warning: config-schema.json should have a \$schema field"
fi
# Check that it has either properties or definitions
if ! jq -e '.properties or .definitions or ."$defs"' config-schema.json > /dev/null; then
echo "Warning: JSON Schema should contain properties, definitions, or \$defs"
fi
echo "✓ config-schema.json is valid"

60
Build.Containerfile Normal file
View File

@@ -0,0 +1,60 @@
# Stage 1: Build UI with Node.js
FROM node:25.2.1-trixie-slim AS ui-builder
WORKDIR /app
# Copy UI source
COPY ui/package*.json ./
RUN npm install -g typescript && npm ci --only=production
RUN npm install --save-dev @types/react @types/react-dom
# Build UI
COPY ui/ ./
RUN npm run build
# Stage 2: Build Go binary with embedded UI
FROM golang:1.25.4 AS go-builder
WORKDIR /app
# Copy go mod and sum files
COPY go.mod go.sum ./
# Download dependencies
RUN go mod download
# Copy all source code
COPY . .
# Copy UI build artifacts to embed directory
#COPY --from=ui-builder /app/build ./ui/build
COPY --from=ui-builder /proxy/ui_dist ./proxy/ui_dist
# Build the binary (Linux AMD64)
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
go build -ldflags="-X main.commit=$(git rev-parse --short HEAD) -X main.version=local_$(git rev-parse --short HEAD) -X main.date=$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \
-o build/llama-swap
FROM ghcr.io/ggml-org/llama.cpp:full-vulkan
# has to be after the FROM
ARG LS_VER=170
# Add user/group
ENV HOME=/app
WORKDIR /app
# Copy the binary from builder stage
COPY --from=go-builder /app/build/llama-swap .
COPY ./docker/config.example.yaml /config.yaml
ENTRYPOINT [ "/app/llama-swap", "-config", "/config.yaml" ]

View File

@@ -1,3 +1,15 @@
# Llama Swap fork with socket activation
This is a fork of https://github.com/mostlygeek/llama-swap with the following changes
- Support for systemd socket activation
- Container file for fully containerized build with podman targetting Linux and Vulkan
- Documentation and example service files how to set up a socket activated
podman container isolated with network=none
(see [Readme](docs/socket_activation/README.md) )
# Original readme:
![llama-swap header image](header2.png)
![GitHub Downloads (all assets, all releases)](https://img.shields.io/github/downloads/mostlygeek/llama-swap/total)
![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/mostlygeek/llama-swap/go-ci.yml)
@@ -42,7 +54,8 @@ Built in Go for performance and simplicity, llama-swap has zero dependencies and
llama-swap includes a real time web interface for monitoring logs and controlling models:
<img width="1360" height="963" alt="image" src="https://github.com/user-attachments/assets/adef4a8e-de0b-49db-885a-8f6dedae6799" />
<img width="1164" height="745" alt="image" src="https://github.com/user-attachments/assets/bacf3f9d-819f-430b-9ed2-1bfaa8d54579" />
The Activity Page shows recent requests:

View File

@@ -235,8 +235,9 @@ func (p *proxyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
slog.Warn("failed to send magic WoL packet", "error", err)
}
// For root path requests, return loading page with status polling
if path == "/" {
// For root or UI path requests, return loading page with status polling
// the web page will do the polling and redirect when ready
if path == "/" || strings.HasPrefix(path, "/ui/") {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, loadingPageHTML)

278
config-schema.json Normal file
View File

@@ -0,0 +1,278 @@
{
"$schema": "https://json-schema.org/draft-07/schema#",
"$id": "llama-swap-config-schema.json",
"title": "llama-swap configuration",
"description": "Configuration file for llama-swap",
"type": "object",
"required": [
"models"
],
"definitions": {
"macros": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "string",
"minLength": 0,
"maxLength": 1024
},
{
"type": "number"
},
{
"type": "boolean"
}
]
},
"propertyNames": {
"type": "string",
"minLength": 1,
"maxLength": 64,
"pattern": "^[a-zA-Z0-9_-]+$",
"not": {
"enum": [
"PORT",
"MODEL_ID"
]
}
},
"default": {},
"description": "A dictionary of string substitutions. Macros are reusable snippets used in model cmd, cmdStop, proxy, checkEndpoint, filters.stripParams. Macro names must be <64 chars, match ^[a-zA-Z0-9_-]+$, and not be PORT or MODEL_ID. Values can be string, number, or boolean. Macros can reference other macros defined before them."
}
},
"properties": {
"healthCheckTimeout": {
"type": "integer",
"minimum": 15,
"default": 120,
"description": "Number of seconds to wait for a model to be ready to serve requests."
},
"logLevel": {
"type": "string",
"enum": [
"debug",
"info",
"warn",
"error"
],
"default": "info",
"description": "Sets the logging value. Valid values: debug, info, warn, error."
},
"logTimeFormat": {
"type": "string",
"enum": [
"",
"ansic",
"unixdate",
"rubydate",
"rfc822",
"rfc822z",
"rfc850",
"rfc1123",
"rfc1123z",
"rfc3339",
"rfc3339nano",
"kitchen",
"stamp",
"stampmilli",
"stampmicro",
"stampnano"
],
"default": "",
"description": "Enables and sets the logging timestamp format. Valid values: \"\", \"ansic\", \"unixdate\", \"rubydate\", \"rfc822\", \"rfc822z\", \"rfc850\", \"rfc1123\", \"rfc1123z\", \"rfc3339\", \"rfc3339nano\", \"kitchen\", \"stamp\", \"stampmilli\", \"stampmicro\", and \"stampnano\". For more info, read: https://pkg.go.dev/time#pkg-constants"
},
"metricsMaxInMemory": {
"type": "integer",
"default": 1000,
"description": "Maximum number of metrics to keep in memory. Controls how many metrics are stored before older ones are discarded."
},
"startPort": {
"type": "integer",
"default": 5800,
"description": "Starting port number for the automatic ${PORT} macro. The ${PORT} macro is incremented for every model that uses it."
},
"sendLoadingState": {
"type": "boolean",
"default": false,
"description": "Inject loading status updates into the reasoning field. When true, a stream of loading messages will be sent to the client."
},
"includeAliasesInList": {
"type": "boolean",
"default": false,
"description": "Present aliases within the /v1/models OpenAI API listing. when true, model aliases will be output to the API model listing duplicating all fields except for Id so chat UIs can use the alias equivalent to the original."
},
"macros": {
"$ref": "#/definitions/macros"
},
"models": {
"type": "object",
"description": "A dictionary of model configurations. Each key is a model's ID. Model settings have defaults if not defined. The model's ID is available as ${MODEL_ID}.",
"additionalProperties": {
"type": "object",
"required": [
"cmd"
],
"properties": {
"macros": {
"$ref": "#/definitions/macros"
},
"cmd": {
"type": "string",
"minLength": 1,
"description": "Command to run to start the inference server. Macros can be used. Comments allowed with |."
},
"cmdStop": {
"type": "string",
"default": "",
"description": "Command to run to stop the model gracefully. Uses ${PID} macro for upstream process id. If empty, default shutdown behavior is used."
},
"name": {
"type": "string",
"default": "",
"maxLength": 128,
"description": "Display name for the model. Used in v1/models API response."
},
"description": {
"type": "string",
"default": "",
"maxLength": 1024,
"description": "Description for the model. Used in v1/models API response."
},
"env": {
"type": "array",
"items": {
"type": "string",
"pattern": "^[A-Z_][A-Z0-9_]*=.*$"
},
"default": [],
"description": "Array of environment variables to inject into cmd's environment. Each value is a string in ENV_NAME=value format."
},
"proxy": {
"type": "string",
"default": "http://localhost:${PORT}",
"format": "uri",
"description": "URL where llama-swap routes API requests. If custom port is used in cmd, this must be set."
},
"aliases": {
"type": "array",
"items": {
"type": "string",
"minLength": 1
},
"default": [],
"description": "Alternative model names for this configuration. Must be unique globally."
},
"checkEndpoint": {
"type": "string",
"default": "/health",
"pattern": "^/.*$|^none$",
"description": "URL path to check if the server is ready. Use 'none' to skip health checking."
},
"ttl": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Automatically unload the model after ttl seconds. 0 disables unloading. Must be >0 to enable."
},
"useModelName": {
"type": "string",
"default": "",
"description": "Override the model name sent to upstream server. Useful if upstream expects a different name."
},
"filters": {
"type": "object",
"properties": {
"stripParams": {
"type": "string",
"default": "",
"pattern": "^[a-zA-Z0-9_, ]*$",
"description": "Comma separated list of parameters to remove from the request. Used for server-side enforcement of sampling parameters."
}
},
"additionalProperties": false,
"default": {},
"description": "Dictionary of filter settings. Only stripParams is supported."
},
"metadata": {
"type": "object",
"additionalProperties": true,
"default": {},
"description": "Dictionary of arbitrary values included in /v1/models. Can contain complex types. Only passed through in /v1/models responses."
},
"concurrencyLimit": {
"type": "integer",
"minimum": 0,
"default": 0,
"description": "Overrides allowed number of active parallel requests to a model. 0 uses internal default of 10. >0 overrides default. Requests exceeding limit get HTTP 429."
},
"sendLoadingState": {
"type": "boolean",
"description": "Overrides the global sendLoadingState for this model. Ommitting this property will use the global setting."
},
"unlisted": {
"type": "boolean",
"default": false,
"description": "If true the model will not show up in /v1/models responses. It can still be used as normal in API requests."
}
}
}
},
"groups": {
"type": "object",
"additionalProperties": {
"type": "object",
"required": [
"members"
],
"properties": {
"swap": {
"type": "boolean",
"default": true,
"description": "Controls model swapping behaviour within the group. True: only one model runs at a time. False: all models can run together."
},
"exclusive": {
"type": "boolean",
"default": true,
"description": "Controls how the group affects other groups. True: causes all other groups to unload when this group runs a model. False: does not affect other groups."
},
"persistent": {
"type": "boolean",
"default": false,
"description": "Prevents other groups from unloading the models in this group. Does not affect individual model behaviour."
},
"members": {
"type": "array",
"items": {
"type": "string"
},
"description": "Array of model IDs that are members of this group. Model IDs must be defined in models."
}
}
},
"description": "A dictionary of group settings. Provides advanced controls over model swapping behaviour. Model IDs must be defined in models. A model can only be a member of one group. Behaviour controlled via swap, exclusive, persistent."
},
"hooks": {
"type": "object",
"properties": {
"on_startup": {
"type": "object",
"properties": {
"preload": {
"type": "array",
"items": {
"type": "string"
},
"default": [],
"description": "List of model IDs to load on startup. Model names must match keys in models. When preloading multiple models, define a group to prevent swapping."
}
},
"additionalProperties": false,
"description": "Actions to perform on startup. Only supported action is preload."
}
},
"additionalProperties": false,
"description": "A dictionary of event triggers and actions. Only supported hook is on_startup."
}
}
}

View File

@@ -1,3 +1,6 @@
# add this modeline for validation in vscode
# yaml-language-server: $schema=https://raw.githubusercontent.com/mostlygeek/llama-swap/refs/heads/main/config-schema.json
#
# llama-swap YAML configuration example
# -------------------------------------
#
@@ -23,6 +26,14 @@ healthCheckTimeout: 500
# - Valid log levels: debug, info, warn, error
logLevel: info
# logTimeFormat: enables and sets the logging timestamp format
# - optional, default (disabled): ""
# - Valid values: "", "ansic", "unixdate", "rubydate", "rfc822", "rfc822z",
# "rfc850", "rfc1123", "rfc1123z", "rfc3339", "rfc3339nano", "kitchen",
# "stamp", "stampmilli", "stampmicro", and "stampnano".
# - For more info, read: https://pkg.go.dev/time#pkg-constants
logTimeFormat: ""
# metricsMaxInMemory: maximum number of metrics to keep in memory
# - optional, default: 1000
# - controls how many metrics are stored in memory before older ones are discarded
@@ -43,6 +54,12 @@ startPort: 10001
# - see #366 for more details
sendLoadingState: true
# includeAliasesInList: present aliases within the /v1/models OpenAI API listing
# - optional, default: false
# - when true, model aliases will be output to the API model listing duplicating
# all fields except for Id so chat UIs can use the alias equivalent to the original.
includeAliasesInList: false
# macros: a dictionary of string substitutions
# - optional, default: empty dictionary
# - macros are reusable snippets
@@ -72,7 +89,6 @@ macros:
# - the model's ID is available in the ${MODEL_ID} macro, also available in macros defined above
# - below are examples of the all the settings a model can have
models:
# keys are the model names used in API requests
"llama":
# macros: a dictionary of string substitutions specific to this model
@@ -298,10 +314,10 @@ hooks:
# - optional, default: empty dictionary
# - the only supported action is preload
on_startup:
# preload: a list of model ids to load on startup
# - optional, default: empty list
# - model names must match keys in the models sections
# - when preloading multiple models at once, define a group
# otherwise models will be loaded and swapped out
# preload: a list of model ids to load on startup
# - optional, default: empty list
# - model names must match keys in the models sections
# - when preloading multiple models at once, define a group
# otherwise models will be loaded and swapped out
preload:
- "llama"
- "llama"

View File

@@ -0,0 +1,41 @@
# Rootless podman container with Systemd Socket activation
## Idea
By passing in the socket from systemd we minimize resource use when not in use.
Since no other network access is required for operation, we can configure the container
with network=none and minimize the risk of the AI escaping.
## Set up
Optional, if you want to run this as a separate user
```
sudo useradd llama
sudo machinectl shell llama@
```
Check out this repository, navigate to its root directory and build the llama.cpp/llama swap
container with
```
podman build -t localhost/lamaswap:latest -f Build.Containerfile
```
Place llama.socket in ` ~/.config/systemd/user`, adjust ports and interfaces if needed.
Place llama.container in `~/.config/containers/systemd`. Adjust paths for models and config if desired.
The files are in `docs/socket_activation`, next to this readme.
Put model files into the models directory (`~/models`).
Create a llama swap `config.yaml` (by default in `~`) according to the docs.
Start the socket:
```
systemctl --user daemon-reload
systemctl --user enable --now llama.socket
```
If you want to run the service also when the user is not logged in, enable lingering:
```
sudo loginctl enable-linger <user>
```
Check that you can access the llama swap control panel in browser. For troubleshooting, use, e. g., `journalctl -xe`.

View File

@@ -0,0 +1,20 @@
[Unit]
Description=LLama.cpp (fullvulkan) inference server
After=network-online.target
Wants=network-online.target
[Container]
Image=localhost/lamaswap:latest
#AutoRemove=yes
#PublishPort=8080:8080
Network=none
Volume=%h/models:/models:ro,Z
Volume=%h/config.yaml:/config.yaml:ro,Z
AddDevice=/dev/dri
Exec=
#[Service]
#Restart=always
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,8 @@
[Unit]
Description=llama socket
[Socket]
ListenStream=0.0.0.0:8080
[Install]
WantedBy=default.target

1
go.mod
View File

@@ -17,6 +17,7 @@ require (
github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
github.com/coreos/go-systemd/v22 v22.6.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect

2
go.sum
View File

@@ -8,6 +8,8 @@ github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=

View File

@@ -5,15 +5,16 @@ import (
"flag"
"fmt"
"log"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"syscall"
"time"
"github.com/fsnotify/fsnotify"
"github.com/gin-gonic/gin"
"github.com/coreos/go-systemd/v22/activation"
"github.com/mostlygeek/llama-swap/event"
"github.com/mostlygeek/llama-swap/proxy"
"github.com/mostlygeek/llama-swap/proxy/config"
@@ -64,15 +65,33 @@ func main() {
fmt.Println("Error: Both --tls-cert-file and --tls-key-file must be provided for TLS.")
os.Exit(1)
}
// Set default ports.
if *listenStr == "" {
defaultPort := ":8080"
if useTLS {
defaultPort = ":8443"
}
listenStr = &defaultPort
// Check for systemd socket activation
var listener net.Listener
files, err := activation.Listeners()
if err != nil {
log.Fatalf("Systemd activation error: %v", err)
}
if len(files) > 0 {
if len(files) > 1 {
log.Fatalf("Expected a single activated socket, got %d", len(files))
}
listener = files[0]
log.Printf("Using systemd socket %s", listener.Addr())
} else {
// Set default ports.
if *listenStr == "" {
defaultPort := ":8080"
if useTLS {
defaultPort = ":8443"
}
listenStr = &defaultPort
listener, err = net.Listen("tcp", *listenStr)
if err != nil {
log.Fatal("Failed to listen:", err)
}
}
}
defer listener.Close()
// Setup channels for server management
exitChan := make(chan struct{})
@@ -81,7 +100,7 @@ func main() {
// Create server with initial handler
srv := &http.Server{
Addr: *listenStr,
//Addr: *listenStr,
}
// Support for watching config and reloading when it changes
@@ -95,7 +114,9 @@ func main() {
fmt.Println("Configuration Changed")
currentPM.Shutdown()
srv.Handler = proxy.New(conf)
newPM := proxy.New(conf)
newPM.SetVersion(date, commit, version)
srv.Handler = newPM
fmt.Println("Configuration Reloaded")
// wait a few seconds and tell any UI to reload
@@ -110,7 +131,9 @@ func main() {
fmt.Printf("Error, unable to load configuration: %v\n", err)
os.Exit(1)
}
srv.Handler = proxy.New(conf)
newPM := proxy.New(conf)
newPM.SetVersion(date, commit, version)
srv.Handler = newPM
}
}
@@ -190,10 +213,10 @@ func main() {
var err error
if useTLS {
fmt.Printf("llama-swap listening with TLS on https://%s\n", *listenStr)
err = srv.ListenAndServeTLS(*certFile, *keyFile)
err = srv.ServeTLS(listener, *certFile, *keyFile)
} else {
fmt.Printf("llama-swap listening on http://%s\n", *listenStr)
err = srv.ListenAndServe()
err = srv.Serve(listener)
}
if err != nil && err != http.ErrServerClosed {
log.Fatalf("Fatal server error: %v\n", err)

View File

@@ -113,6 +113,7 @@ type Config struct {
HealthCheckTimeout int `yaml:"healthCheckTimeout"`
LogRequests bool `yaml:"logRequests"`
LogLevel string `yaml:"logLevel"`
LogTimeFormat string `yaml:"logTimeFormat"`
MetricsMaxInMemory int `yaml:"metricsMaxInMemory"`
Models map[string]ModelConfig `yaml:"models"` /* key is model ID */
Profiles map[string][]string `yaml:"profiles"`
@@ -132,6 +133,9 @@ type Config struct {
// send loading state in reasoning
SendLoadingState bool `yaml:"sendLoadingState"`
// present aliases to /v1/models OpenAI API listing
IncludeAliasesInList bool `yaml:"includeAliasesInList"`
}
func (c *Config) RealModelName(search string) (string, bool) {
@@ -172,6 +176,7 @@ func LoadConfigFromReader(r io.Reader) (Config, error) {
HealthCheckTimeout: 120,
StartPort: 5800,
LogLevel: "info",
LogTimeFormat: "",
MetricsMaxInMemory: 1000,
}
err = yaml.Unmarshal(data, &config)

View File

@@ -58,6 +58,7 @@ models:
assert.Equal(t, 120, config.HealthCheckTimeout)
assert.Equal(t, 5800, config.StartPort)
assert.Equal(t, "info", config.LogLevel)
assert.Equal(t, "", config.LogTimeFormat)
// Test default group exists
defaultGroup, exists := config.Groups["(default)"]
@@ -163,8 +164,9 @@ groups:
modelLoadingState := false
expected := Config{
LogLevel: "info",
StartPort: 5800,
LogLevel: "info",
LogTimeFormat: "",
StartPort: 5800,
Macros: MacroList{
{"svr-path", "path/to/server"},
},

View File

@@ -55,6 +55,7 @@ models:
assert.Equal(t, 120, config.HealthCheckTimeout)
assert.Equal(t, 5800, config.StartPort)
assert.Equal(t, "info", config.LogLevel)
assert.Equal(t, "", config.LogTimeFormat)
// Test default group exists
defaultGroup, exists := config.Groups["(default)"]
@@ -155,8 +156,9 @@ groups:
modelLoadingState := false
expected := Config{
LogLevel: "info",
StartPort: 5800,
LogLevel: "info",
LogTimeFormat: "",
StartPort: 5800,
Macros: MacroList{
{"svr-path", "path/to/server"},
},

View File

@@ -7,6 +7,7 @@ import (
"io"
"os"
"sync"
"time"
"github.com/mostlygeek/llama-swap/event"
)
@@ -32,6 +33,9 @@ type LogMonitor struct {
// logging levels
level LogLevel
prefix string
// timestamps
timeFormat string
}
func NewLogMonitor() *LogMonitor {
@@ -40,11 +44,12 @@ func NewLogMonitor() *LogMonitor {
func NewLogMonitorWriter(stdout io.Writer) *LogMonitor {
return &LogMonitor{
eventbus: event.NewDispatcherConfig(1000),
buffer: ring.New(10 * 1024), // keep 10KB of buffered logs
stdout: stdout,
level: LevelInfo,
prefix: "",
eventbus: event.NewDispatcherConfig(1000),
buffer: ring.New(10 * 1024), // keep 10KB of buffered logs
stdout: stdout,
level: LevelInfo,
prefix: "",
timeFormat: "",
}
}
@@ -106,12 +111,22 @@ func (w *LogMonitor) SetLogLevel(level LogLevel) {
w.level = level
}
func (w *LogMonitor) SetLogTimeFormat(timeFormat string) {
w.mu.Lock()
defer w.mu.Unlock()
w.timeFormat = timeFormat
}
func (w *LogMonitor) formatMessage(level string, msg string) []byte {
prefix := ""
if w.prefix != "" {
prefix = fmt.Sprintf("[%s] ", w.prefix)
}
return []byte(fmt.Sprintf("%s[%s] %s\n", prefix, level, msg))
timestamp := ""
if w.timeFormat != "" {
timestamp = fmt.Sprintf("%s ", time.Now().Format(w.timeFormat))
}
return []byte(fmt.Sprintf("%s%s[%s] %s\n", timestamp, prefix, level, msg))
}
func (w *LogMonitor) log(level LogLevel, msg string) {

View File

@@ -3,8 +3,10 @@ package proxy
import (
"bytes"
"io"
"strings"
"sync"
"testing"
"time"
)
func TestLogMonitor(t *testing.T) {
@@ -84,3 +86,30 @@ func TestWrite_ImmutableBuffer(t *testing.T) {
t.Errorf("Expected history to be %q, got %q", expected, history)
}
}
func TestWrite_LogTimeFormat(t *testing.T) {
// Create a new LogMonitor instance
lm := NewLogMonitorWriter(io.Discard)
// Enable timestamps
lm.timeFormat = time.RFC3339
// Write the message to the LogMonitor
lm.Info("Hello, World!")
// Get the history from the LogMonitor
history := lm.GetHistory()
timestamp := ""
fields := strings.Fields(string(history))
if len(fields) > 0 {
timestamp = fields[0]
} else {
t.Fatalf("Cannot extract string from history")
}
_, err := time.Parse(time.RFC3339, timestamp)
if err != nil {
t.Fatalf("Cannot find timestamp: %v", err)
}
}

View File

@@ -45,6 +45,11 @@ type ProxyManager struct {
// shutdown signaling
shutdownCtx context.Context
shutdownCancel context.CancelFunc
// version info
buildDate string
commit string
version string
}
func New(config config.Config) *ProxyManager {
@@ -75,6 +80,30 @@ func New(config config.Config) *ProxyManager {
upstreamLogger.SetLogLevel(LevelInfo)
}
// see: https://go.dev/src/time/format.go
timeFormats := map[string]string{
"ansic": time.ANSIC,
"unixdate": time.UnixDate,
"rubydate": time.RubyDate,
"rfc822": time.RFC822,
"rfc822z": time.RFC822Z,
"rfc850": time.RFC850,
"rfc1123": time.RFC1123,
"rfc1123z": time.RFC1123Z,
"rfc3339": time.RFC3339,
"rfc3339nano": time.RFC3339Nano,
"kitchen": time.Kitchen,
"stamp": time.Stamp,
"stampmilli": time.StampMilli,
"stampmicro": time.StampMicro,
"stampnano": time.StampNano,
}
if timeFormat, ok := timeFormats[strings.ToLower(strings.TrimSpace(config.LogTimeFormat))]; ok {
proxyLogger.SetLogTimeFormat(timeFormat)
upstreamLogger.SetLogTimeFormat(timeFormat)
}
shutdownCtx, shutdownCancel := context.WithCancel(context.Background())
var maxMetrics int
@@ -98,6 +127,10 @@ func New(config config.Config) *ProxyManager {
shutdownCtx: shutdownCtx,
shutdownCancel: shutdownCancel,
buildDate: "unknown",
commit: "abcd1234",
version: "0",
}
// create the process groups
@@ -376,28 +409,40 @@ func (pm *ProxyManager) listModelsHandler(c *gin.Context) {
continue
}
record := gin.H{
"id": id,
"object": "model",
"created": createdTime,
"owned_by": "llama-swap",
newRecord := func(modelId string) gin.H {
record := gin.H{
"id": modelId,
"object": "model",
"created": createdTime,
"owned_by": "llama-swap",
}
if name := strings.TrimSpace(modelConfig.Name); name != "" {
record["name"] = name
}
if desc := strings.TrimSpace(modelConfig.Description); desc != "" {
record["description"] = desc
}
// Add metadata if present
if len(modelConfig.Metadata) > 0 {
record["meta"] = gin.H{
"llamaswap": modelConfig.Metadata,
}
}
return record
}
if name := strings.TrimSpace(modelConfig.Name); name != "" {
record["name"] = name
}
if desc := strings.TrimSpace(modelConfig.Description); desc != "" {
record["description"] = desc
}
data = append(data, newRecord(id))
// Add metadata if present
if len(modelConfig.Metadata) > 0 {
record["meta"] = gin.H{
"llamaswap": modelConfig.Metadata,
// Include aliases
if pm.config.IncludeAliasesInList {
for _, alias := range modelConfig.Aliases {
if alias := strings.TrimSpace(alias); alias != "" {
data = append(data, newRecord(alias))
}
}
}
data = append(data, record)
}
// Sort by the "id" key
@@ -734,3 +779,11 @@ func (pm *ProxyManager) findGroupByModelName(modelName string) *ProcessGroup {
}
return nil
}
func (pm *ProxyManager) SetVersion(buildDate string, commit string, version string) {
pm.Lock()
defer pm.Unlock()
pm.buildDate = buildDate
pm.commit = commit
pm.version = version
}

View File

@@ -28,6 +28,7 @@ func addApiHandlers(pm *ProxyManager) {
apiGroup.POST("/models/unload/*model", pm.apiUnloadSingleModelHandler)
apiGroup.GET("/events", pm.apiSendEvents)
apiGroup.GET("/metrics", pm.apiGetMetrics)
apiGroup.GET("/version", pm.apiGetVersion)
}
}
@@ -227,3 +228,11 @@ func (pm *ProxyManager) apiUnloadSingleModelHandler(c *gin.Context) {
c.String(http.StatusOK, "OK")
}
}
func (pm *ProxyManager) apiGetVersion(c *gin.Context) {
c.JSON(http.StatusOK, map[string]string{
"version": pm.version,
"commit": pm.commit,
"build_date": pm.buildDate,
})
}

View File

@@ -437,6 +437,70 @@ func TestProxyManager_ListModelsHandler_SortedByID(t *testing.T) {
}
}
func TestProxyManager_ListModelsHandler_IncludeAliasesInList(t *testing.T) {
// Configure alias
config := config.Config{
HealthCheckTimeout: 15,
IncludeAliasesInList: true,
Models: map[string]config.ModelConfig{
"model1": func() config.ModelConfig {
mc := getTestSimpleResponderConfig("model1")
mc.Name = "Model 1"
mc.Aliases = []string{"alias1"}
return mc
}(),
},
LogLevel: "error",
}
proxy := New(config)
// Request models list
req := httptest.NewRequest("GET", "/v1/models", nil)
w := CreateTestResponseRecorder()
proxy.ServeHTTP(w, req)
assert.Equal(t, http.StatusOK, w.Code)
var response struct {
Data []map[string]interface{} `json:"data"`
}
if err := json.Unmarshal(w.Body.Bytes(), &response); err != nil {
t.Fatalf("Failed to parse JSON response: %v", err)
}
// We expect both base id and alias
var model1Data, alias1Data map[string]any
for _, model := range response.Data {
if model["id"] == "model1" {
model1Data = model
} else if model["id"] == "alias1" {
alias1Data = model
}
}
// Verify model1 has name
assert.NotNil(t, model1Data)
_, exists := model1Data["name"]
if !assert.True(t, exists, "model1 should have name key") {
t.FailNow()
}
name1, ok := model1Data["name"].(string)
assert.True(t, ok, "name1 should be a string")
// Verify alias1 has name
assert.NotNil(t, alias1Data)
_, exists = alias1Data["name"]
if !assert.True(t, exists, "alias1 should have name key") {
t.FailNow()
}
name2, ok := alias1Data["name"].(string)
assert.True(t, ok, "name2 should be a string")
// Name keys should match
assert.Equal(t, name1, name2)
}
func TestProxyManager_Shutdown(t *testing.T) {
// make broken model configurations
model1Config := getTestSimpleResponderConfigPort("model1", 9991)
@@ -1083,3 +1147,41 @@ func TestProxyManager_ProxiedStreamingEndpointReturnsNoBufferingHeader(t *testin
assert.Equal(t, "no", rec.Header().Get("X-Accel-Buffering"))
assert.Contains(t, rec.Header().Get("Content-Type"), "text/event-stream")
}
func TestProxyManager_ApiGetVersion(t *testing.T) {
config := config.AddDefaultGroupToConfig(config.Config{
HealthCheckTimeout: 15,
Models: map[string]config.ModelConfig{
"model1": getTestSimpleResponderConfig("model1"),
},
LogLevel: "error",
})
// Version test map
versionTest := map[string]string{
"build_date": "1970-01-01T00:00:00Z",
"commit": "cc915ddb6f04a42d9cd1f524e1d46ec6ed069fdc",
"version": "v001",
}
proxy := New(config)
proxy.SetVersion(versionTest["build_date"], versionTest["commit"], versionTest["version"])
defer proxy.StopProcesses(StopWaitForInflightRequest)
req := httptest.NewRequest("GET", "/api/version", nil)
w := CreateTestResponseRecorder()
proxy.ServeHTTP(w, req)
assert.Equal(t, http.StatusOK, w.Code)
// Ensure json response
assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type"))
// Check for attributes
response := map[string]string{}
assert.NoError(t, json.Unmarshal(w.Body.Bytes(), &response))
for key, value := range versionTest {
assert.Equal(t, value, response[key], "%s value %s should match response %s", key, value, response[key])
}
}

View File

@@ -2,7 +2,7 @@ import { useAPI } from "../contexts/APIProvider";
import { useMemo } from "react";
const ConnectionStatusIcon = () => {
const { connectionStatus } = useAPI();
const { connectionStatus, versionInfo } = useAPI();
const eventStatusColor = useMemo(() => {
switch (connectionStatus) {
@@ -17,7 +17,7 @@ const ConnectionStatusIcon = () => {
}, [connectionStatus]);
return (
<div className="flex items-center" title={`event stream: ${connectionStatus}`}>
<div className="flex items-center" title={`Event Stream: ${connectionStatus ?? 'unknown'}\nAPI Version: ${versionInfo?.version ?? 'unknown'}\nCommit Hash: ${versionInfo?.commit?.substring(0,7) ?? 'unknown'}\nBuild Date: ${versionInfo?.build_date ?? 'unknown'}`}>
<span className={`inline-block w-3 h-3 rounded-full ${eventStatusColor} mr-2`}></span>
</div>
);

View File

@@ -23,6 +23,7 @@ interface APIProviderType {
upstreamLogs: string;
metrics: Metrics[];
connectionStatus: ConnectionState;
versionInfo: VersionInfo;
}
interface Metrics {
@@ -41,11 +42,18 @@ interface LogData {
source: "upstream" | "proxy";
data: string;
}
interface APIEventEnvelope {
type: "modelStatus" | "logData" | "metrics";
data: string;
}
interface VersionInfo {
build_date: string;
commit: string;
version: string;
}
const APIContext = createContext<APIProviderType | undefined>(undefined);
type APIProviderProps = {
children: ReactNode;
@@ -59,6 +67,11 @@ export function APIProvider({ children, autoStartAPIEvents = true }: APIProvider
const [upstreamLogs, setUpstreamLogs] = useState("");
const [metrics, setMetrics] = useState<Metrics[]>([]);
const [connectionStatus, setConnectionState] = useState<ConnectionState>("disconnected");
const [versionInfo, setVersionInfo] = useState<VersionInfo>({
build_date: "unknown",
commit: "unknown",
version: "unknown"
});
//const apiEventSource = useRef<EventSource | null>(null);
const [models, setModels] = useState<Model[]>([]);
@@ -152,6 +165,26 @@ export function APIProvider({ children, autoStartAPIEvents = true }: APIProvider
connect();
}, []);
useEffect(() => {
// fetch version
const fetchVersion = async () => {
try {
const response = await fetch("/api/version");
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data: VersionInfo = await response.json();
setVersionInfo(data);
} catch (error) {
console.error(error);
}
};
if (connectionStatus === 'connected') {
fetchVersion();
}
}, [connectionStatus]);
useEffect(() => {
if (autoStartAPIEvents) {
enableAPIEvents(true);
@@ -230,8 +263,9 @@ export function APIProvider({ children, autoStartAPIEvents = true }: APIProvider
upstreamLogs,
metrics,
connectionStatus,
versionInfo,
}),
[models, listModels, unloadAllModels, loadModel, enableAPIEvents, proxyLogs, upstreamLogs, metrics]
[models, listModels, unloadAllModels, unloadSingleModel, loadModel, enableAPIEvents, proxyLogs, upstreamLogs, metrics, connectionStatus, versionInfo]
);
return <APIContext.Provider value={value}>{children}</APIContext.Provider>;