From bbaf1729564a8e5f98b95b075cbbfe62d24bb909 Mon Sep 17 00:00:00 2001 From: Ian Sebastian Mathew Date: Wed, 30 Jul 2025 22:42:21 +0530 Subject: [PATCH] add trigger to rebuild homebrew formula (#210) --- .github/workflows/trigger-homebrew-update.yml | 21 +++++++++++++++++++ README.md | 14 +++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 .github/workflows/trigger-homebrew-update.yml diff --git a/.github/workflows/trigger-homebrew-update.yml b/.github/workflows/trigger-homebrew-update.yml new file mode 100644 index 0000000..a1b319d --- /dev/null +++ b/.github/workflows/trigger-homebrew-update.yml @@ -0,0 +1,21 @@ +name: Trigger Homebrew Tap Update +on: + release: + types: [published] + +jobs: + trigger-tap-update: + runs-on: ubuntu-latest + steps: + - name: "Trigger tap repository update" + uses: peter-evans/repository-dispatch@v2 + with: + token: ${{ secrets.TAP_REPO_PAT }} + repository: mostlygeek/homebrew-llama-swap + event-type: new-release + client-payload: |- + { + "release": { + "tag_name": "${{ github.event.release.tag_name }}" + } + } \ No newline at end of file diff --git a/README.md b/README.md index 088fda8..b3b810e 100644 --- a/README.md +++ b/README.md @@ -122,6 +122,20 @@ $ docker run -it --rm --runtime nvidia -p 9292:8080 \ +## Homebrew Install (macOS/Linux) + +For macOS & Linux users, `llama-swap` can be installed via [Homebrew](https://brew.sh): + +```shell +# Set up tap and install formula +brew tap mostlygeek/llama-swap +brew install llama-swap +# Run llama-swap +llama-swap --config path/to/config.yaml --listen localhost:8080 +``` + +This will install the `llama-swap` binary and make it available in your path. See the [configuration documentation](https://github.com/mostlygeek/llama-swap/wiki/Configuration) + ## Bare metal Install ([download](https://github.com/mostlygeek/llama-swap/releases)) Pre-built binaries are available for Linux, Mac, Windows and FreeBSD. These are automatically published and are likely a few hours ahead of the docker releases. The baremetal install works with any OpenAI compatible server, not just llama-server.