wip: self hoast modes

This commit is contained in:
Daniel Salazar
2026-05-03 00:46:09 -07:00
parent fd0e32039a
commit 63732cbc8a
12 changed files with 801 additions and 84 deletions
+36
View File
@@ -1,4 +1,40 @@
.dockerignore
Dockerfile
docker-compose.yml
# Dev / build artifacts (recreated inside the build stage)
node_modules
dist
src/gui/dist
src/puter-js/dist
*.tsbuildinfo
# Local runtime data
volatile
config.json
config.dev.json
/puter
# OS / editor
.DS_Store
.vscode
.idea
# Git / CI
.git
.github
# Logs
*.log
npm-debug.log*
.npm
# Tests / coverage
coverage
.nyc_output
# Secrets
.env
.env.*
creds*
*.pem
+18
View File
@@ -0,0 +1,18 @@
# Copy this file to `.env`, fill in the secrets, and `docker compose -f
# docker-compose.full.yml up -d`. None of the defaults below are safe for
# anything beyond a local laptop test.
# ── Public-facing ports (nginx) ---------------------------------------
HTTP_PORT=80
# HTTPS_PORT=443 # uncomment after you enable TLS in nginx/nginx.conf
# ── MariaDB ------------------------------------------------------------
MARIADB_ROOT_PASSWORD=replace-with-strong-password
MARIADB_DATABASE=puter
MARIADB_USER=puter
MARIADB_PASSWORD=replace-with-strong-password
# ── S3 (RustFS) --------------------------------------------------------
S3_ACCESS_KEY=puter
S3_SECRET_KEY=replace-with-strong-secret
S3_BUCKET=puter-local
+67 -70
View File
@@ -1,91 +1,88 @@
# /!\ NOTICE /!\
# syntax=docker/dockerfile:1.7
#
# OSS Puter image — multi-arch (linux/amd64, linux/arm64).
#
# Build & push:
# docker buildx build --platform linux/amd64,linux/arm64 \
# -t ghcr.io/heyputer/puter:latest --push .
#
# Local single-arch build:
# docker build -t puter .
#
# Self-hosters inject configuration by mounting a config.json at
# /etc/puter/config.json. It is deep-merged over the bundled
# config.default.json, so partial overrides work. Absent file = defaults.
# Many of the developers DO NOT USE the Dockerfile or image.
# While we do test new changes to Docker configuration, it's
# possible that future changes to the repo might break it.
# When changing this file, please try to make it as resiliant
# to such changes as possible; developers shouldn't need to
# worry about Docker unless the build/run process changes.
# ---- Build stage ----
FROM node:24-slim AS build
# Build stage
FROM node:24-alpine AS build
WORKDIR /opt/puter
# Install build dependencies
RUN apk add --no-cache git python3 make g++ \
&& ln -sf /usr/bin/python3 /usr/bin/python
# Build toolchain needed for native deps (bcrypt, sharp, better-sqlite3, …).
RUN apt-get update && \
apt-get install -y --no-install-recommends python3 make g++ git && \
rm -rf /var/lib/apt/lists/*
# Set up working directory
WORKDIR /app
ENV HUSKY=0
ENV npm_config_fund=false
ENV npm_config_audit=false
# Copy package.json and package-lock.json
# ---- Dependency layer ---------------------------------------------------
# Copy ONLY package manifests + lockfile first so the npm-install layer
# stays cached when only source files change.
COPY package.json package-lock.json ./
COPY src/backend/package.json src/backend/
COPY src/gui/package.json src/gui/
COPY src/puter-js/package.json src/puter-js/package-lock.json src/puter-js/
COPY src/worker/package.json src/worker/
COPY src/docs/package.json src/docs/
# Fail early if lockfile or manifest is missing
RUN test -f package.json && test -f package-lock.json
# extensionSetup.mjs runs as the postinstall hook during npm ci. (No-ops
# unless any packages/puter/extensions/* gain a package.json.)
COPY tools/extensionSetup.mjs tools/extensionSetup.mjs
# Copy the source files
RUN --mount=type=cache,target=/root/.npm \
npm ci
# ---- Source layer -------------------------------------------------------
COPY . .
# Install mocha
RUN npm i -g npm@latest
RUN npm install -g mocha
# Compile backend TS, then build GUI + puter-js webpack bundles in
# parallel. The GUI/puter-js bundles are how /dist/bundle.min.{js,css}
# and /sdk/puter.js fall back to local assets when the kernel-config
# CDN keys are unset.
RUN npm run build:ts
RUN set -e; \
(cd src/gui && node ./build.js) & gui_pid=$!; \
(cd src/puter-js && npm run build) & pjs_pid=$!; \
wait $gui_pid; \
wait $pjs_pid
# Install node modules
RUN npm cache clean --force && \
for i in 1 2 3; do \
npm ci && break || \
if [ $i -lt 3 ]; then \
sleep 15; \
else \
LOG_DIR="$(npm config get cache | tr -d '\"')/_logs"; \
echo "npm install failed; dumping logs from $LOG_DIR"; \
if [ -d "$LOG_DIR" ]; then \
ls -al "$LOG_DIR" || true; \
cat "$LOG_DIR"/* || true; \
else \
echo "Log directory not found (npm cache: $(npm config get cache))"; \
fi; \
exit 1; \
fi; \
done
# ---- Runtime stage (slim — no build tools) ----
FROM node:24-slim
# Run the build command if necessary
RUN cd src/gui && npm run build && cd -
WORKDIR /opt/puter
# Production stage
FROM node:24-alpine
# git: runtime version probe. wget: HEALTHCHECK.
RUN apt-get update && \
apt-get install -y --no-install-recommends git wget && \
rm -rf /var/lib/apt/lists/*
# Set labels
LABEL repo="https://github.com/HeyPuter/puter"
LABEL license="AGPL-3.0,https://github.com/HeyPuter/puter/blob/master/LICENSE.txt"
LABEL version="1.2.46-beta-1"
COPY --from=build --chown=node:node /opt/puter .
# Install git (required by Puter to check version)
RUN apk add --no-cache git
RUN mkdir -p /etc/puter /var/puter && \
chown -R node:node /etc/puter /var/puter
# Set up working directory
RUN mkdir -p /opt/puter/app
WORKDIR /opt/puter/app
# Copy built artifacts and necessary files from the build stage
COPY --from=build /app/src/gui/dist ./dist
COPY --from=build /app/node_modules ./node_modules
COPY . .
# Set permissions
RUN chown -R node:node /opt/puter/app
USER node
# Self-hosters mount their override at this exact path. The v2 loader
# deep-merges it over config.default.json (see backend/index.ts).
ENV PUTER_CONFIG_PATH=/etc/puter/config.json
ENV NODE_OPTIONS=--enable-source-maps
EXPOSE 4100
HEALTHCHECK --interval=30s --timeout=3s \
USER node
HEALTHCHECK --interval=30s --timeout=3s --start-period=30s --retries=3 \
CMD wget --no-verbose --tries=1 --spider http://puter.localhost:4100/test || exit 1
ENV NO_VAR_RUNTUME=1
ENV NODE_OPTIONS=--enable-source-maps
# Attempt to fix `lru-cache@11.0.2` missing after build stage
# by doing a redundant `npm install` at this stage
RUN npm install
CMD ["npm", "start"]
CMD ["node", "-r", "./dist/src/backend/telemetry.js", "./dist/src/backend/index.js"]
+187
View File
@@ -0,0 +1,187 @@
---
# Self-hosted Puter — full stack.
#
# Brings up Puter + every external service it needs:
# - nginx : reverse proxy (mirrors prod ALB; handles TLS + Host fan-out)
# - valkey : redis-compatible cache / rate-limiter backend
# - mariadb : SQL database (Puter applies its schema on first boot)
# - dynamo : DynamoDB-local (KV store; Puter creates the table itself)
# - s3 : RustFS — S3-compatible object storage
# - s3-init : one-shot init container that creates the bucket
# - puter : the application
#
# Quick start:
# 1. Copy .env.example to .env (or set the variables in your shell).
# 2. Drop a config.json into ./puter/config/ — see selfhosting.md
# for the example that pairs with this compose.
# 3. docker compose -f docker-compose.full.yml up -d
#
# Production:
# - Always replace the default passwords / S3 keys / Puter secrets.
# - Front Puter with TLS-terminating reverse proxy (Caddy / nginx).
# - Move state-bearing volumes to a backed-up location.
services:
valkey:
image: valkey/valkey:8-alpine
container_name: puter-valkey
restart: unless-stopped
command:
- "valkey-server"
- "--save"
- "60"
- "1"
- "--appendonly"
- "yes"
volumes:
- ./puter/data/valkey:/data
healthcheck:
test: ["CMD", "valkey-cli", "ping"]
interval: 5s
timeout: 3s
retries: 10
mariadb:
image: mariadb:11
container_name: puter-mariadb
restart: unless-stopped
environment:
MARIADB_ROOT_PASSWORD: ${MARIADB_ROOT_PASSWORD:-root-change-me}
MARIADB_DATABASE: ${MARIADB_DATABASE:-puter}
MARIADB_USER: ${MARIADB_USER:-puter}
MARIADB_PASSWORD: ${MARIADB_PASSWORD:-puter-change-me}
volumes:
- ./puter/data/mariadb:/var/lib/mysql
healthcheck:
# `healthcheck.sh` ships with the mariadb image; --connect verifies
# the server is accepting auth, not just listening on the socket.
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
interval: 5s
timeout: 5s
retries: 20
start_period: 30s
dynamo:
# Puter creates the `store-kv-v1` table itself on startup
# (config.dynamo.bootstrapTables = true does the work).
image: amazon/dynamodb-local:latest
container_name: puter-dynamo
restart: unless-stopped
user: "1000:1000"
working_dir: /home/dynamodblocal
command:
- "-jar"
- "DynamoDBLocal.jar"
- "-sharedDb"
- "-dbPath"
- "/home/dynamodblocal/data"
volumes:
- ./puter/data/dynamo:/home/dynamodblocal/data
s3:
# RustFS — S3-compatible object storage. Drop-in alternative:
# MinIO (image: minio/minio, command: ["server", "/data", "--console-address", ":9001"]).
image: rustfs/rustfs:latest
container_name: puter-s3
restart: unless-stopped
environment:
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY:-puter}
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY:-puter-secret-change-me}
volumes:
- ./puter/data/s3:/data
healthcheck:
# RustFS exposes /health on the S3 port. Use wget (curl is not in
# the slim image).
test:
[
"CMD-SHELL",
"wget -qO- --tries=1 --timeout=2 http://localhost:9000/health || exit 1",
]
interval: 5s
timeout: 3s
retries: 20
start_period: 5s
s3-init:
# One-shot container that creates the `puter-local` bucket on first
# boot. Exits 0 once the bucket exists; stays exited 0 thereafter.
image: amazon/aws-cli:latest
container_name: puter-s3-init
depends_on:
s3:
condition: service_healthy
environment:
AWS_ACCESS_KEY_ID: ${S3_ACCESS_KEY:-puter}
AWS_SECRET_ACCESS_KEY: ${S3_SECRET_KEY:-puter-secret-change-me}
AWS_DEFAULT_REGION: us-east-1
entrypoint:
- /bin/sh
- -c
- |
set -e
endpoint=http://s3:9000
bucket=${S3_BUCKET:-puter-local}
if aws --endpoint-url "$$endpoint" s3api head-bucket --bucket "$$bucket" 2>/dev/null; then
echo "bucket $$bucket already exists"
else
echo "creating bucket $$bucket"
aws --endpoint-url "$$endpoint" s3 mb "s3://$$bucket"
fi
restart: "no"
puter:
image: ghcr.io/heyputer/puter:latest
pull_policy: always
container_name: puter
restart: unless-stopped
depends_on:
valkey:
condition: service_healthy
mariadb:
condition: service_healthy
dynamo:
condition: service_started
s3-init:
condition: service_completed_successfully
# Internal-only: nginx reaches it on the compose network. Uncomment
# to also expose port 4100 directly on the host (useful for debugging).
# ports:
# - "4100:4100"
expose:
- "4100"
environment:
PUID: 1000
PGID: 1000
volumes:
# Drop your config.json here — see selfhosting.md.
- ./puter/config:/etc/puter
# Persistent runtime data (anything your config points at /var/puter).
- ./puter/data/puter:/var/puter
healthcheck:
test: wget --no-verbose --tries=1 --spider http://puter.localhost:4100/test || exit 1
interval: 30s
timeout: 3s
retries: 3
start_period: 30s
nginx:
image: nginx:1.27-alpine
container_name: puter-nginx
restart: unless-stopped
depends_on:
puter:
condition: service_started
ports:
- "${HTTP_PORT:-80}:80"
# Uncomment when you enable TLS in nginx/nginx.conf:
# - "${HTTPS_PORT:-443}:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
# TLS certs (fullchain.pem + privkey.pem). Read-only inside.
- ./puter/tls:/etc/nginx/tls:ro
healthcheck:
test: ["CMD-SHELL", "wget -qO- --tries=1 --timeout=2 http://localhost/ || exit 1"]
interval: 10s
timeout: 3s
retries: 5
start_period: 5s
+12 -3
View File
@@ -1,21 +1,30 @@
---
version: "3.8"
services:
puter:
container_name: puter
image: ghcr.io/heyputer/puter:latest
pull_policy: always
# build: ./
# Uncomment to build from this directory instead of pulling the published image:
# build:
# context: .
# # buildx-only: cross-compile to both archs in a single push
# # platforms:
# # - linux/amd64
# # - linux/arm64
restart: unless-stopped
ports:
- '4100:4100'
environment:
# TZ: Europe/Paris
# CONFIG_PATH: /etc/puter
PUID: 1000
PGID: 1000
volumes:
# Drop your config.json into ./puter/config/. It is deep-merged over
# config.default.json — only override what you care to change.
# Image expects /etc/puter/config.json (see PUTER_CONFIG_PATH in Dockerfile).
- ./puter/config:/etc/puter
# Persistent runtime data (sqlite db, uploads, etc. — depends on your
# config). Maps to volatile/ inside the container by default.
- ./puter/data:/var/puter
healthcheck:
test: wget --no-verbose --tries=1 --spider http://puter.localhost:4100/test || exit 1
+85
View File
@@ -0,0 +1,85 @@
# Reverse proxy in front of Puter — mirrors what the prod ALB does:
# accepts every Host header, forwards to the Puter container, and lets
# the Puter app handle subdomain-based routing internally (api.*,
# site.*, app.*, etc).
#
# To enable TLS:
# 1. Drop your fullchain.pem + privkey.pem into ./puter/tls/.
# 2. Uncomment the 443 server{} block below.
# 3. Update server_name to your domain (and wildcard subdomains).
worker_processes auto;
events {
worker_connections 4096;
}
http {
# Required for Puter's WebSocket / socket.io upgrades.
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
# Rough size cap that mirrors prod ALB defaults; tune for your
# uploads. Puter chunks large uploads, so 1 GiB per request is plenty.
client_max_body_size 1024m;
proxy_read_timeout 600s;
proxy_send_timeout 600s;
proxy_buffering off;
server_tokens off;
upstream puter_backend {
server puter:4100;
keepalive 32;
}
# ── HTTP (port 80) — catches all hostnames ─────────────────────
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
# Note: when you enable TLS, replace this block with a redirect:
# return 301 https://$host$request_uri;
location / {
proxy_pass http://puter_backend;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
# ── HTTPS (port 443) — uncomment after dropping certs in ./puter/tls/ ─
# server {
# listen 443 ssl default_server;
# listen [::]:443 ssl default_server;
# http2 on;
# server_name _;
#
# ssl_certificate /etc/nginx/tls/fullchain.pem;
# ssl_certificate_key /etc/nginx/tls/privkey.pem;
# ssl_protocols TLSv1.2 TLSv1.3;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_session_cache shared:SSL:10m;
# ssl_session_timeout 10m;
#
# location / {
# proxy_pass http://puter_backend;
# proxy_http_version 1.1;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $scheme;
# proxy_set_header X-Forwarded-Host $host;
# proxy_set_header X-Forwarded-Port $server_port;
# proxy_set_header Upgrade $http_upgrade;
# proxy_set_header Connection $connection_upgrade;
# }
# }
}
+1 -1
View File
@@ -54,7 +54,7 @@
"check-translations": "node tools/check-translations.js",
"prepare": "husky",
"build:ts": "tsc -p tsconfig.json && node ./tools/write-dist-package-json.mjs",
"postinstall": "./tools/extensionSetup.sh"
"postinstall": "node ./tools/extensionSetup.mjs"
},
"workspaces": [
"src/*",
+324
View File
@@ -0,0 +1,324 @@
# Self-hosting Puter
There are three supported ways to run Puter, in increasing order of effort and capability:
| Mode | Best for | External services |
| -------------------------------------------- | ------------------------------------------- | ---------------------------------------------------------------- |
| [**1. Dev (npm)**](#1-dev-mode-npm-start) | Trying it out on your laptop / LAN | None — everything runs in-process |
| [**2. Standalone Docker**](#2-standalone-docker) | Production single-host, BYO database / S3 | None bundled — point at whatever you already run |
| [**3. Full self-hosted stack**](#3-full-self-hosted-stack-docker-compose) | Production with a self-managed stack | Bundled: MariaDB, Valkey, DynamoDB-local, RustFS S3, nginx |
Pick one, follow that section, ignore the rest. There's also a [troubleshooting](#troubleshooting) section at the bottom.
---
## 1. Dev mode (npm start)
For trying Puter on your laptop or sharing it on your local network. **Not safe to expose to the internet** — uses dev secrets and an in-process key store.
**Requirements:** Node.js 24+, a C toolchain (Xcode CLT on macOS, `build-essential` + `python3` on Debian/Ubuntu) for native deps.
```bash
# from packages/puter/
npm install
npm run build # one-time — compiles backend, GUI, and puter.js
npm start # daily use — re-builds backend only, then starts
```
Open <http://puter.localhost:4100> in your browser.
That's it. With no `config.json` present, defaults give you:
- SQLite at `volatile/runtime/puter-database.sqlite` (auto-created)
- In-process S3 (`fauxqs`) with the `puter-local` bucket auto-created
- In-process DynamoDB (`dynalite`) with its table auto-created
- In-process Redis (`ioredis-mock`)
All state goes into `./volatile/`. Delete it to reset.
To override anything (port, domain, etc.), drop a `config.json` next to `package.json`:
```json
{ "port": 5101, "domain": "myhost.local" }
```
It deep-merges over `config.default.json`. Restart with `npm start`.
---
## 2. Standalone Docker
Single Puter container; you bring your own database, S3, etc. (or run with the in-process defaults for a quick spin).
**Requirements:** Docker.
The image is multi-arch (`linux/amd64`, `linux/arm64`).
### Quick start
```bash
mkdir -p puter/config puter/data
docker run -d \
--name puter \
--restart unless-stopped \
-p 4100:4100 \
-v $(pwd)/puter/config:/etc/puter \
-v $(pwd)/puter/data:/var/puter \
ghcr.io/heyputer/puter:latest
```
Open <http://puter.localhost:4100>. With no config mounted, the in-process defaults kick in (same as dev mode), and state lands in `puter/data/`.
### Adding a config
The container reads **`/etc/puter/config.json`** and deep-merges it on top of the bundled defaults. You only put the keys you want to change.
1. Create the file:
```bash
touch ./puter/config/config.json
```
2. Add overrides:
```json
{
"domain": "puter.example.com",
"protocol": "https",
"pub_port": 443,
"jwt_secret": "REPLACE-WITH-openssl-rand-hex-64",
"url_signature_secret": "REPLACE-WITH-A-DIFFERENT-openssl-rand-hex-64"
}
```
3. Restart: `docker restart puter`.
Confirm it took effect — logs should show:
```
[config] override from /etc/puter/config.json
```
### Wiring to external services
Same `config.json`, just add the relevant blocks. Mix and match.
**MySQL / MariaDB** (with idempotent schema bootstrap):
```json
{
"database": {
"engine": "mysql",
"host": "db.internal", "port": 3306,
"user": "puter", "password": "...", "database": "puter",
"migrationPaths": ["/opt/puter/src/backend/clients/database/migrations"]
}
}
```
**Real S3 / S3-compatible:**
```json
{
"s3": {
"s3Config": {
"endpoint": "https://s3.example.com",
"accessKeyId": "...", "secretAccessKey": "...",
"region": "us-east-1"
}
},
"s3_bucket": "my-puter-bucket",
"s3_region": "us-east-1"
}
```
**Real DynamoDB** (existing tables; provision externally):
```json
{
"dynamo": {
"aws": { "accessKeyId": "...", "secretAccessKey": "...", "region": "us-east-1" }
}
}
```
**Real Redis cluster:**
```json
{ "redis": { "startupNodes": [{ "host": "redis-0", "port": 6379 }] } }
```
**Always replace secrets.** The two below are baked into the public image and known to anyone — change them for any non-toy install:
```json
{ "jwt_secret": "...", "url_signature_secret": "..." }
```
Generate with `openssl rand -hex 64`.
### Persistent data
Anything you point at `/var/puter/...` in your config (e.g. SQLite path, fauxqs data dirs) lives on the host via the `./puter/data` mount. If you're using external services for everything, the data volume is optional.
### Updating
```bash
docker pull ghcr.io/heyputer/puter:latest
docker rm -f puter && <re-run the docker run command above>
```
Your `config.json` and persistent data are untouched.
### Building the image yourself
```bash
docker build -t puter .
# Multi-arch (requires buildx, on by default in modern Docker):
docker buildx build \
--platform linux/amd64,linux/arm64 \
-t your-registry/puter:latest \
--push .
```
---
## 3. Full self-hosted stack (docker compose)
Brings up Puter **plus every external service it needs**, configured to talk to each other out of the box. Closest thing to a production deployment you can self-manage.
**Requirements:** Docker with the compose plugin.
| Service | Image | Role |
| ------------- | ------------------------------ | --------------------------------------------------------- |
| `nginx` | `nginx:1.27-alpine` | Reverse proxy (mirrors prod ALB; TLS termination point) |
| `puter` | `ghcr.io/heyputer/puter` | The app |
| `mariadb` | `mariadb:11` | SQL database — schema applied automatically on first boot |
| `valkey` | `valkey/valkey:8-alpine` | Redis-compatible cache + rate-limiter |
| `dynamo` | `amazon/dynamodb-local` | KV store — table auto-created on first boot |
| `s3` | `rustfs/rustfs` | S3-compatible object storage (MinIO drop-in noted in file)|
| `s3-init` | `amazon/aws-cli` | One-shot — creates the bucket on first boot, then exits |
State lives under `./puter/data/<service>/`.
### Setup
1. **Create your `.env`** (secrets for the bundled services):
```bash
cp .env.example .env
```
Open `.env` and replace every `replace-with-...` value. Use `openssl rand -hex 32` for each.
2. **Create your `config.json`** at `./puter/config/config.json` — this wires Puter to the bundled services. Copy this verbatim, then replace the `REPLACE-...` markers (and make sure the password / secret values match your `.env`):
```json
{
"domain": "puter.localhost",
"protocol": "http",
"pub_port": 80,
"jwt_secret": "REPLACE-WITH-openssl-rand-hex-64",
"url_signature_secret": "REPLACE-WITH-A-DIFFERENT-openssl-rand-hex-64",
"database": {
"engine": "mysql",
"host": "mariadb", "port": 3306,
"user": "puter",
"password": "MUST-MATCH-MARIADB_PASSWORD-IN-DOTENV",
"database": "puter",
"migrationPaths": ["/opt/puter/src/backend/clients/database/migrations"]
},
"redis": { "startupNodes": [{ "host": "valkey", "port": 6379 }] },
"dynamo": {
"endpoint": "http://dynamo:8000",
"bootstrapTables": true,
"aws": { "accessKeyId": "fake", "secretAccessKey": "fake", "region": "us-east-1" }
},
"s3": {
"s3Config": {
"endpoint": "http://s3:9000",
"accessKeyId": "puter",
"secretAccessKey": "MUST-MATCH-S3_SECRET_KEY-IN-DOTENV",
"region": "us-east-1"
}
},
"s3_bucket": "puter-local",
"s3_region": "us-east-1"
}
```
Why these matter:
- `database.migrationPaths` — Puter applies the bundled MySQL schema (idempotent) on boot.
- `dynamo.bootstrapTables: true` — Puter creates its KV table on boot. **Only set against a local emulator**, never real AWS.
- The `dynamo.aws` keys are dummies; DynamoDB-local doesn't validate them but the AWS SDK requires *something*.
3. **Start it:**
```bash
docker compose -f docker-compose.full.yml up -d
```
First boot takes ~30s while MariaDB initialises and migrations apply. Tail logs:
```bash
docker compose -f docker-compose.full.yml logs -f puter
```
Healthy startup logs:
```
[config] override from /etc/puter/config.json
[mysql] running migrations from /opt/puter/src/backend/clients/database/migrations: 1 file(s)
[mysql] applied mysql_mig_1.sql (...)
```
4. **Open** <http://puter.localhost> (port 80, behind nginx).
### TLS
The default nginx config listens on port 80. To enable HTTPS:
1. Drop `fullchain.pem` and `privkey.pem` into `./puter/tls/` (use `certbot --standalone` against your domain or copy from a wildcard cert).
2. In [nginx/nginx.conf](nginx/nginx.conf), uncomment the 443 server block. Optionally replace the body of the port-80 server with `return 301 https://$host$request_uri;`.
3. In [docker-compose.full.yml](docker-compose.full.yml), uncomment the `443:443` port mapping under `nginx`.
4. In `config.json`, set:
```json
{ "protocol": "https", "pub_port": 443 }
```
5. Restart:
```bash
docker compose -f docker-compose.full.yml restart nginx puter
```
For wildcard subdomain support (Puter uses `api.<domain>`, `site.<domain>`, `app.<domain>`), make sure your DNS and cert cover `*.<your-domain>`. nginx's `server_name _` already accepts every Host header.
### Updating
```bash
docker compose -f docker-compose.full.yml pull
docker compose -f docker-compose.full.yml up -d
```
Migrations re-apply idempotently. Volumes are preserved.
### Tearing down
```bash
docker compose -f docker-compose.full.yml down # stop containers, keep data
rm -rf ./puter/data # nuke ALL state (irreversible!)
```
---
## Troubleshooting
**`docker logs puter` shows the container restarting.**
Most often a syntax error in `config.json` or a port already in use. Validate the JSON: `jq . ./puter/config/config.json`.
**The config file isn't picked up.**
Check the path resolves to `/etc/puter/config.json` *inside* the container:
```bash
docker exec puter cat /etc/puter/config.json
```
If that prints nothing, the volume mount is wrong.
**Healthcheck failing but the site loads.**
The healthcheck hits `http://puter.localhost:4100/test` from inside the container. If you changed `domain` or `port` in your config, the healthcheck still uses the defaults and may report unhealthy — the site itself is fine.
**Architecture mismatch on Apple Silicon / ARM hosts.**
Use the published `:latest` tag — it's already multi-arch. If you built locally with `docker build` on an Intel Mac, the resulting image will be `linux/amd64` only.
**`npm start` says missing `dist/`.**
You skipped `npm run build`. The `prestart` hook only rebuilds the backend; the GUI + `puter.js` bundles need the full build once.
**`docker compose -f docker-compose.full.yml up` hangs at "waiting for healthy".**
Check which dependency is unhealthy: `docker compose -f docker-compose.full.yml ps`. MariaDB takes ~2030s on first boot to initialise; everything else should be ready in under 5s. If something stays unhealthy, `logs <service>` will tell you.
+5 -2
View File
@@ -203,9 +203,12 @@ export class SystemKVStore extends PuterStore {
override async onServerStart(): Promise<void> {
// For local/dynalite runs we need to create the table up front.
// For real AWS we assume the table already exists.
// Real AWS deployments provision tables externally (Terraform), so
// we skip — unless the operator explicitly opts in via
// `dynamo.bootstrapTables` (e.g. self-hosting against
// dynamodb-local in docker-compose).
const ddbConfig = this.config.dynamo ?? {};
if (ddbConfig.aws) return;
if (ddbConfig.aws && !ddbConfig.bootstrapTables) return;
this.initialized = this.clients.dynamo.createTableIfNotExists(
{ ...PUTER_KV_STORE_TABLE_DEFINITION, TableName: this.tableName },
+7
View File
@@ -29,6 +29,13 @@ export interface IDynamoConfig {
aws?: IAWSCredentials;
endpoint?: string;
path?: string;
/**
* Create required tables on startup if they don't exist. Off by
* default because real-AWS deployments provision tables externally
* (Terraform / IaC). Set to `true` when pointing at a local
* DynamoDB emulator so self-hosters don't have to bootstrap by hand.
*/
bootstrapTables?: boolean;
}
export interface IRedisConfig {
+59
View File
@@ -0,0 +1,59 @@
#!/usr/bin/env node
// Install dependencies for every subfolder under ./extensions/.
// Runs installs in parallel; uses `npm ci` when a lockfile is present,
// otherwise falls back to `npm install`. Cross-platform replacement for
// the previous bash version.
import { existsSync, readdirSync, statSync } from 'node:fs';
import { spawn } from 'node:child_process';
import { join } from 'node:path';
const EXT_DIR = './extensions';
if (!existsSync(EXT_DIR)) {
process.exit(0);
}
const dirs = readdirSync(EXT_DIR)
.map((name) => join(EXT_DIR, name))
.filter((p) => statSync(p).isDirectory())
.filter((p) => existsSync(join(p, 'package.json')));
if (dirs.length === 0) {
process.exit(0);
}
const npmCmd = process.platform === 'win32' ? 'npm.cmd' : 'npm';
function install(dir) {
return new Promise((resolve, reject) => {
const args = existsSync(join(dir, 'package-lock.json')) ? ['ci'] : ['install'];
console.log(`[${dir}] starting npm ${args.join(' ')}`);
const child = spawn(npmCmd, args, { cwd: dir });
let out = '';
child.stdout.on('data', (d) => (out += d));
child.stderr.on('data', (d) => (out += d));
child.on('error', reject);
child.on('close', (code) => {
if (out) process.stdout.write(out);
if (code === 0) {
console.log(`[${dir}] done`);
resolve();
} else {
reject(new Error(`[${dir}] npm ${args.join(' ')} exited with code ${code}`));
}
});
});
}
const results = await Promise.allSettled(dirs.map(install));
const failures = results
.map((r, i) => ({ r, dir: dirs[i] }))
.filter(({ r }) => r.status === 'rejected');
if (failures.length > 0) {
for (const { r, dir } of failures) {
console.error(`[${dir}] ${r.reason?.message ?? r.reason}`);
}
process.exit(1);
}
-8
View File
@@ -1,8 +0,0 @@
#~!/bin/bash
# iterate through each folder in extensions/ if they contain a package.json, run npm install
for d in ./extensions/*/ ; do
if [ -f "$d/package.json" ]; then
echo "Installing dependencies for $d"
(cd "$d" && npm install)
fi
done