selfhost: fullstack self host p1 (#2902)

* wip: self hosted full setup

* wip: self hoast modes

* more wip self host stuff

* wip: full release

* fix: custom apps

* fix: readme

* full-stack md

* docs update

* fix

* fix: commands

* remove comments

* fix: docs and migs

* mysql mig

* fix: docker changes

* fix: mysql checks

* fix: mysql mig

* fix: docker more

* fix: redis conn issues
This commit is contained in:
Daniel Salazar
2026-05-04 15:58:03 -07:00
committed by GitHub
parent 8642a5b58c
commit 2de8807942
74 changed files with 3157 additions and 153 deletions
+36
View File
@@ -1,4 +1,40 @@
.dockerignore
Dockerfile
docker-compose.yml
# Dev / build artifacts (recreated inside the build stage)
node_modules
dist
src/gui/dist
src/puter-js/dist
*.tsbuildinfo
# Local runtime data
volatile
config.json
config.dev.json
/puter
# OS / editor
.DS_Store
.vscode
.idea
# Git / CI
.git
.github
# Logs
*.log
npm-debug.log*
.npm
# Tests / coverage
coverage
.nyc_output
# Secrets
.env
.env.*
creds*
*.pem
+18
View File
@@ -0,0 +1,18 @@
# Copy this file to `.env`, fill in the secrets, and `docker compose -f
# docker-compose.full.yml up -d`. None of the defaults below are safe for
# anything beyond a local laptop test.
# ── Public-facing ports (nginx) ---------------------------------------
HTTP_PORT=80
# HTTPS_PORT=443 # uncomment after you enable TLS in nginx/nginx.conf
# ── MariaDB ------------------------------------------------------------
MARIADB_ROOT_PASSWORD=replace-with-strong-password
MARIADB_DATABASE=puter
MARIADB_USER=puter
MARIADB_PASSWORD=replace-with-strong-password
# ── S3 (RustFS) --------------------------------------------------------
S3_ACCESS_KEY=puter
S3_SECRET_KEY=replace-with-strong-secret
S3_BUCKET=puter-local
+8
View File
@@ -59,9 +59,17 @@ jobs:
uses: docker/metadata-action@v5
with:
images: "${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}"
# Tag plan:
# * version tag (vX.Y.Z) push → 1.2.3, 1.2, latest
# * branch push (main) → main
# selfhosted/docker.md tells users to pull `:latest`, which only
# resolves for tag pushes — never main, so unstable code can't
# claim `:latest`.
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=ref,event=branch
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }}
# This step uses the `docker/build-push-action` action to build the
# image, based on your repository's `Dockerfile`. If the build succeeds,
+67 -70
View File
@@ -1,91 +1,88 @@
# /!\ NOTICE /!\
# syntax=docker/dockerfile:1.7
#
# OSS Puter image — multi-arch (linux/amd64, linux/arm64).
#
# Build & push:
# docker buildx build --platform linux/amd64,linux/arm64 \
# -t ghcr.io/heyputer/puter:latest --push .
#
# Local single-arch build:
# docker build -t puter .
#
# Self-hosters inject configuration by mounting a config.json at
# /etc/puter/config.json. It is deep-merged over the bundled
# config.default.json, so partial overrides work. Absent file = defaults.
# Many of the developers DO NOT USE the Dockerfile or image.
# While we do test new changes to Docker configuration, it's
# possible that future changes to the repo might break it.
# When changing this file, please try to make it as resiliant
# to such changes as possible; developers shouldn't need to
# worry about Docker unless the build/run process changes.
# ---- Build stage ----
FROM node:24-slim AS build
# Build stage
FROM node:24-alpine AS build
WORKDIR /opt/puter
# Install build dependencies
RUN apk add --no-cache git python3 make g++ \
&& ln -sf /usr/bin/python3 /usr/bin/python
# Build toolchain needed for native deps (bcrypt, sharp, better-sqlite3, …).
RUN apt-get update && \
apt-get install -y --no-install-recommends python3 make g++ git && \
rm -rf /var/lib/apt/lists/*
# Set up working directory
WORKDIR /app
ENV HUSKY=0
ENV npm_config_fund=false
ENV npm_config_audit=false
# Copy package.json and package-lock.json
# ---- Dependency layer ---------------------------------------------------
# Copy ONLY package manifests + lockfile first so the npm-install layer
# stays cached when only source files change.
COPY package.json package-lock.json ./
COPY src/backend/package.json src/backend/
COPY src/gui/package.json src/gui/
COPY src/puter-js/package.json src/puter-js/package-lock.json src/puter-js/
COPY src/worker/package.json src/worker/
COPY src/docs/package.json src/docs/
# Fail early if lockfile or manifest is missing
RUN test -f package.json && test -f package-lock.json
# extensionSetup.mjs runs as the postinstall hook during npm ci. (No-ops
# unless any packages/puter/extensions/* gain a package.json.)
COPY tools/extensionSetup.mjs tools/extensionSetup.mjs
# Copy the source files
RUN --mount=type=cache,target=/root/.npm \
npm ci
# ---- Source layer -------------------------------------------------------
COPY . .
# Install mocha
RUN npm i -g npm@latest
RUN npm install -g mocha
# Compile backend TS, then build GUI + puter-js webpack bundles in
# parallel. The GUI/puter-js bundles are how /dist/bundle.min.{js,css}
# and /sdk/puter.js fall back to local assets when the kernel-config
# CDN keys are unset.
RUN npm run build:ts
RUN set -e; \
(cd src/gui && node ./build.js) & gui_pid=$!; \
(cd src/puter-js && npm run build) & pjs_pid=$!; \
wait $gui_pid; \
wait $pjs_pid
# Install node modules
RUN npm cache clean --force && \
for i in 1 2 3; do \
npm ci && break || \
if [ $i -lt 3 ]; then \
sleep 15; \
else \
LOG_DIR="$(npm config get cache | tr -d '\"')/_logs"; \
echo "npm install failed; dumping logs from $LOG_DIR"; \
if [ -d "$LOG_DIR" ]; then \
ls -al "$LOG_DIR" || true; \
cat "$LOG_DIR"/* || true; \
else \
echo "Log directory not found (npm cache: $(npm config get cache))"; \
fi; \
exit 1; \
fi; \
done
# ---- Runtime stage (slim — no build tools) ----
FROM node:24-slim
# Run the build command if necessary
RUN cd src/gui && npm run build && cd -
WORKDIR /opt/puter
# Production stage
FROM node:24-alpine
# git: runtime version probe. wget: HEALTHCHECK.
RUN apt-get update && \
apt-get install -y --no-install-recommends git wget && \
rm -rf /var/lib/apt/lists/*
# Set labels
LABEL repo="https://github.com/HeyPuter/puter"
LABEL license="AGPL-3.0,https://github.com/HeyPuter/puter/blob/master/LICENSE.txt"
LABEL version="1.2.46-beta-1"
COPY --from=build --chown=node:node /opt/puter .
# Install git (required by Puter to check version)
RUN apk add --no-cache git
RUN mkdir -p /etc/puter /var/puter && \
chown -R node:node /etc/puter /var/puter
# Set up working directory
RUN mkdir -p /opt/puter/app
WORKDIR /opt/puter/app
# Copy built artifacts and necessary files from the build stage
COPY --from=build /app/src/gui/dist ./dist
COPY --from=build /app/node_modules ./node_modules
COPY . .
# Set permissions
RUN chown -R node:node /opt/puter/app
USER node
# Self-hosters mount their override at this exact path. The v2 loader
# deep-merges it over config.default.json (see backend/index.ts).
ENV PUTER_CONFIG_PATH=/etc/puter/config.json
ENV NODE_OPTIONS=--enable-source-maps
EXPOSE 4100
HEALTHCHECK --interval=30s --timeout=3s \
USER node
HEALTHCHECK --interval=30s --timeout=3s --start-period=30s --retries=3 \
CMD wget --no-verbose --tries=1 --spider http://puter.localhost:4100/test || exit 1
ENV NO_VAR_RUNTUME=1
ENV NODE_OPTIONS=--enable-source-maps
# Attempt to fix `lru-cache@11.0.2` missing after build stage
# by doing a redundant `npm install` at this stage
RUN npm install
CMD ["npm", "start"]
CMD ["node", "-r", "./dist/src/backend/telemetry.js", "./dist/src/backend/index.js"]
+217
View File
@@ -0,0 +1,217 @@
---
# Self-hosted Puter — full stack.
#
# Brings up Puter + every external service it needs:
# - nginx : reverse proxy (mirrors prod ALB; handles TLS + Host fan-out)
# - valkey : redis-compatible cache / rate-limiter backend
# - mariadb : SQL database (Puter applies its schema on first boot)
# - dynamo : DynamoDB-local (KV store; Puter creates the table itself)
# - s3 : RustFS — S3-compatible object storage
# - s3-init : one-shot init container that creates the bucket
# - puter : the application
#
# Quick start:
# 1. Copy .env.example to .env (or set the variables in your shell).
# 2. Drop a config.json into ./puter/config/ — see selfhosted/full-stack.md
# for the example that pairs with this compose.
# 3. docker compose -f docker-compose.full.yml up -d
#
# Production:
# - Always replace the default passwords / S3 keys / Puter secrets.
# - Front Puter with TLS-terminating reverse proxy (Caddy / nginx).
# - Move state-bearing volumes to a backed-up location.
services:
valkey:
image: valkey/valkey:8-alpine
container_name: puter-valkey
restart: unless-stopped
# Run as a single-node cluster so Puter's ioredis Cluster client
# (the only mode it speaks) can connect. On first boot we assign all
# 16384 slots to ourselves; subsequent boots find them already in
# nodes.conf and skip. `cluster-require-full-coverage no` keeps reads
# working if we ever land partial slots.
command:
- sh
- -c
- |
valkey-server \
--port 6379 \
--cluster-enabled yes \
--cluster-config-file /data/nodes.conf \
--cluster-node-timeout 5000 \
--cluster-require-full-coverage no \
--cluster-announce-ip valkey \
--cluster-announce-port 6379 \
--cluster-announce-bus-port 16379 \
--appendonly yes \
--save "60 1" &
SERVER_PID=$$!
until valkey-cli -p 6379 PING > /dev/null 2>&1; do sleep 0.5; done
if ! valkey-cli -p 6379 CLUSTER NODES | grep -q '0-16383'; then
valkey-cli -p 6379 CLUSTER ADDSLOTSRANGE 0 16383
fi
wait $$SERVER_PID
volumes:
- ./puter/data/valkey:/data
healthcheck:
test:
["CMD-SHELL", "valkey-cli -p 6379 cluster info | grep -q cluster_state:ok"]
interval: 5s
timeout: 3s
retries: 20
start_period: 10s
mariadb:
image: mariadb:11
container_name: puter-mariadb
restart: unless-stopped
environment:
MARIADB_ROOT_PASSWORD: ${MARIADB_ROOT_PASSWORD:-root-change-me}
MARIADB_DATABASE: ${MARIADB_DATABASE:-puter}
MARIADB_USER: ${MARIADB_USER:-puter}
MARIADB_PASSWORD: ${MARIADB_PASSWORD:-puter-change-me}
volumes:
- ./puter/data/mariadb:/var/lib/mysql
healthcheck:
# `healthcheck.sh` ships with the mariadb image; --connect verifies
# the server is accepting auth, not just listening on the socket.
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
interval: 5s
timeout: 5s
retries: 20
start_period: 30s
dynamo:
# Puter creates the `store-kv-v1` table itself on startup
# (config.dynamo.bootstrapTables = true does the work).
image: amazon/dynamodb-local:latest
container_name: puter-dynamo
restart: unless-stopped
user: "1000:1000"
working_dir: /home/dynamodblocal
command:
- "-jar"
- "DynamoDBLocal.jar"
- "-sharedDb"
- "-dbPath"
- "/home/dynamodblocal/data"
volumes:
- ./puter/data/dynamo:/home/dynamodblocal/data
s3:
# RustFS — S3-compatible object storage. Drop-in alternative:
# MinIO (image: minio/minio, command: ["server", "/data", "--console-address", ":9001"]).
image: rustfs/rustfs:latest
container_name: puter-s3
restart: unless-stopped
environment:
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY:-puter}
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY:-puter-secret-change-me}
volumes:
- ./puter/data/s3:/data
healthcheck:
# RustFS exposes /health on the S3 port. Use wget (curl is not in
# the slim image).
test:
[
"CMD-SHELL",
"wget -qO- --tries=1 --timeout=2 http://localhost:9000/health || exit 1",
]
interval: 5s
timeout: 3s
retries: 20
start_period: 5s
s3-init:
# One-shot container that creates the `puter-local` bucket on first
# boot. Exits 0 once the bucket exists; stays exited 0 thereafter.
image: amazon/aws-cli:latest
container_name: puter-s3-init
depends_on:
s3:
condition: service_healthy
environment:
AWS_ACCESS_KEY_ID: ${S3_ACCESS_KEY:-puter}
AWS_SECRET_ACCESS_KEY: ${S3_SECRET_KEY:-puter-secret-change-me}
AWS_DEFAULT_REGION: us-east-1
entrypoint:
- /bin/sh
- -c
- |
set -e
endpoint=http://s3:9000
bucket=${S3_BUCKET:-puter-local}
if aws --endpoint-url "$$endpoint" s3api head-bucket --bucket "$$bucket" 2>/dev/null; then
echo "bucket $$bucket already exists"
else
echo "creating bucket $$bucket"
aws --endpoint-url "$$endpoint" s3 mb "s3://$$bucket"
fi
restart: "no"
puter:
# image: ghcr.io/heyputer/puter:latest
pull_policy: always
# Uncomment to build from this directory instead of pulling the published
# image. Also flip pull_policy to `never` so compose doesn't overwrite
# your local build by re-pulling :latest.
build:
context: .
# buildx-only: cross-compile to both archs in a single push
platforms:
# - linux/amd64
- linux/arm64
container_name: puter
restart: unless-stopped
depends_on:
valkey:
condition: service_healthy
mariadb:
condition: service_healthy
dynamo:
condition: service_started
s3-init:
condition: service_completed_successfully
# Internal-only: nginx reaches it on the compose network. Uncomment
# to also expose port 4100 directly on the host (useful for debugging).
# ports:
# - "4100:4100"
expose:
- "4100"
environment:
PUID: 1000
PGID: 1000
volumes:
# Drop your config.json here — see selfhosted/full-stack.md.
- ./puter/config:/etc/puter
# Persistent runtime data (anything your config points at /var/puter).
- ./puter/data/puter:/var/puter
healthcheck:
test: wget --no-verbose --tries=1 --spider http://puter.localhost:4100/test || exit 1
interval: 30s
timeout: 3s
retries: 3
start_period: 30s
nginx:
image: nginx:1.27-alpine
container_name: puter-nginx
restart: unless-stopped
depends_on:
puter:
condition: service_started
ports:
- "${HTTP_PORT:-80}:80"
# Uncomment when you enable TLS in nginx/nginx.conf:
# - "${HTTPS_PORT:-443}:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
# TLS certs (fullchain.pem + privkey.pem). Read-only inside.
- ./puter/tls:/etc/nginx/tls:ro
healthcheck:
test: ["CMD-SHELL", "wget -qO- --tries=1 --timeout=2 http://localhost/ || exit 1"]
interval: 10s
timeout: 3s
retries: 5
start_period: 5s
+12 -3
View File
@@ -1,21 +1,30 @@
---
version: "3.8"
services:
puter:
container_name: puter
image: ghcr.io/heyputer/puter:latest
pull_policy: always
# build: ./
# Uncomment to build from this directory instead of pulling the published image:
# build:
# context: .
# # buildx-only: cross-compile to both archs in a single push
# # platforms:
# # - linux/amd64
# # - linux/arm64
restart: unless-stopped
ports:
- '4100:4100'
environment:
# TZ: Europe/Paris
# CONFIG_PATH: /etc/puter
PUID: 1000
PGID: 1000
volumes:
# Drop your config.json into ./puter/config/. It is deep-merged over
# config.default.json — only override what you care to change.
# Image expects /etc/puter/config.json (see PUTER_CONFIG_PATH in Dockerfile).
- ./puter/config:/etc/puter
# Persistent runtime data (sqlite db, uploads, etc. — depends on your
# config). Maps to volatile/ inside the container by default.
- ./puter/data:/var/puter
healthcheck:
test: wget --no-verbose --tries=1 --spider http://puter.localhost:4100/test || exit 1
+85
View File
@@ -0,0 +1,85 @@
# Reverse proxy in front of Puter — mirrors what the prod ALB does:
# accepts every Host header, forwards to the Puter container, and lets
# the Puter app handle subdomain-based routing internally (api.*,
# site.*, app.*, etc).
#
# To enable TLS:
# 1. Drop your fullchain.pem + privkey.pem into ./puter/tls/.
# 2. Uncomment the 443 server{} block below.
# 3. Update server_name to your domain (and wildcard subdomains).
worker_processes auto;
events {
worker_connections 4096;
}
http {
# Required for Puter's WebSocket / socket.io upgrades.
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
# Rough size cap that mirrors prod ALB defaults; tune for your
# uploads. Puter chunks large uploads, so 1 GiB per request is plenty.
client_max_body_size 1024m;
proxy_read_timeout 600s;
proxy_send_timeout 600s;
proxy_buffering off;
server_tokens off;
upstream puter_backend {
server puter:4100;
keepalive 32;
}
# ── HTTP (port 80) — catches all hostnames ─────────────────────
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
# Note: when you enable TLS, replace this block with a redirect:
# return 301 https://$host$request_uri;
location / {
proxy_pass http://puter_backend;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
# ── HTTPS (port 443) — uncomment after dropping certs in ./puter/tls/ ─
# server {
# listen 443 ssl default_server;
# listen [::]:443 ssl default_server;
# http2 on;
# server_name _;
#
# ssl_certificate /etc/nginx/tls/fullchain.pem;
# ssl_certificate_key /etc/nginx/tls/privkey.pem;
# ssl_protocols TLSv1.2 TLSv1.3;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_session_cache shared:SSL:10m;
# ssl_session_timeout 10m;
#
# location / {
# proxy_pass http://puter_backend;
# proxy_http_version 1.1;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $scheme;
# proxy_set_header X-Forwarded-Host $host;
# proxy_set_header X-Forwarded-Port $server_port;
# proxy_set_header Upgrade $http_upgrade;
# proxy_set_header Connection $connection_upgrade;
# }
# }
}
+23 -28
View File
@@ -452,7 +452,6 @@
"resolved": "https://registry.npmjs.org/@aws-sdk/client-dynamodb/-/client-dynamodb-3.1032.0.tgz",
"integrity": "sha512-kkXiZBNdWCQAg/8opqAu10TxzdpqMkcGrNAT2ScdfWhCpzYZ2pmSpP8W7BOlA32jYIWnYrEdb808UZsNWYBPAA==",
"license": "Apache-2.0",
"peer": true,
"dependencies": {
"@aws-crypto/sha256-browser": "5.2.0",
"@aws-crypto/sha256-js": "5.2.0",
@@ -1748,7 +1747,6 @@
}
],
"license": "MIT",
"peer": true,
"engines": {
"node": ">=20.19.0"
},
@@ -1797,7 +1795,6 @@
}
],
"license": "MIT",
"peer": true,
"engines": {
"node": ">=20.19.0"
}
@@ -1812,6 +1809,29 @@
"node": ">=10.0.0"
}
},
"node_modules/@emnapi/core": {
"version": "1.10.0",
"resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.10.0.tgz",
"integrity": "sha512-yq6OkJ4p82CAfPl0u9mQebQHKPJkY7WrIuk205cTYnYe+k2Z8YBh11FrbRG/H6ihirqcacOgl2BIO8oyMQLeXw==",
"dev": true,
"license": "MIT",
"optional": true,
"peer": true,
"dependencies": {
"@emnapi/wasi-threads": "1.2.1",
"tslib": "^2.4.0"
}
},
"node_modules/@emnapi/runtime": {
"version": "1.10.0",
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz",
"integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==",
"license": "MIT",
"optional": true,
"dependencies": {
"tslib": "^2.4.0"
}
},
"node_modules/@emnapi/wasi-threads": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.1.tgz",
@@ -3073,7 +3093,6 @@
"resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.1.tgz",
"integrity": "sha512-gLyJlPHPZYdAk1JENA9LeHejZe1Ti77/pTeFm/nMXmQH/HFZlcS/O2XJB+L8fkbrNSqhdtlvjBVjxwUYanNH5Q==",
"license": "Apache-2.0",
"peer": true,
"engines": {
"node": ">=8.0.0"
}
@@ -6807,7 +6826,6 @@
"integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@types/estree": "*",
"@types/json-schema": "*"
@@ -6952,7 +6970,6 @@
"resolved": "https://registry.npmjs.org/@types/node/-/node-24.12.2.tgz",
"integrity": "sha512-A1sre26ke7HDIuY/M23nd9gfB+nrmhtYyMINbjI1zHJxYteKR6qSMX56FsmjMcDb3SMcjJg5BiRRgOCC/yBD0g==",
"license": "MIT",
"peer": true,
"dependencies": {
"undici-types": "~7.16.0"
}
@@ -7094,7 +7111,6 @@
"integrity": "sha512-/Zb/xaIDfxeJnvishjGdcR4jmr7S+bda8PKNhRGdljDM+elXhlvN0FyPSsMnLmJUrVG9aPO6dof80wjMawsASg==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@typescript-eslint/scope-manager": "8.58.2",
"@typescript-eslint/types": "8.58.2",
@@ -7380,7 +7396,6 @@
"integrity": "sha512-x7FptB5oDruxNPDNY2+S8tCh0pcq7ymCe1gTHcsp733jYjrJl8V1gMUlVysuCD9Kz46Xz9t1akkv08dPcYDs1w==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@bcoe/v8-coverage": "^1.0.2",
"@vitest/utils": "4.1.4",
@@ -7520,7 +7535,6 @@
"integrity": "sha512-EgFR7nlj5iTDYZYCvavjFokNYwr3c3ry0sFiCg+N7B233Nwp+NNx7eoF/XvMWDCKY71xXAG3kFkt97ZHBJVL8A==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@vitest/utils": "4.1.4",
"fflate": "^0.8.2",
@@ -7858,7 +7872,6 @@
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz",
"integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==",
"license": "MIT",
"peer": true,
"bin": {
"acorn": "bin/acorn"
},
@@ -8478,7 +8491,6 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"baseline-browser-mapping": "^2.10.12",
"caniuse-lite": "^1.0.30001782",
@@ -8660,7 +8672,6 @@
"resolved": "https://registry.npmjs.org/chai/-/chai-4.5.0.tgz",
"integrity": "sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw==",
"license": "MIT",
"peer": true,
"dependencies": {
"assertion-error": "^1.1.0",
"check-error": "^1.0.3",
@@ -9251,7 +9262,6 @@
}
],
"license": "MIT",
"peer": true,
"engines": {
"node": ">=18"
},
@@ -9274,7 +9284,6 @@
}
],
"license": "MIT",
"peer": true,
"engines": {
"node": ">=18"
}
@@ -9950,7 +9959,6 @@
"integrity": "sha512-XoMjdBOwe/esVgEvLmNsD3IRHkm7fbKIUGvrleloJXUZgDHig2IPWNniv+GwjyJXzuNqVjlr5+4yVUZjycJwfQ==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.8.0",
"@eslint-community/regexpp": "^4.12.1",
@@ -10011,7 +10019,6 @@
"integrity": "sha512-82GZUjRS0p/jganf6q1rEO25VSoHH0hKPCTrgillPjdI/3bgBhAE1QzHrHTizjpRvy6pGAvKjDJtk2pF9NDq8w==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"eslint-config-prettier": "bin/cli.js"
},
@@ -10645,7 +10652,6 @@
"resolved": "https://registry.npmjs.org/fengari/-/fengari-0.1.5.tgz",
"integrity": "sha512-0DS4Nn4rV8qyFlQCpKK8brT61EUtswynrpfFTcgLErcilBIBskSMQ86fO2WVuybr14ywyKdRjv91FiRZwnEuvQ==",
"license": "MIT",
"peer": true,
"dependencies": {
"readline-sync": "^1.4.10",
"sprintf-js": "^1.1.3",
@@ -11908,7 +11914,6 @@
"resolved": "https://registry.npmjs.org/ioredis/-/ioredis-5.10.1.tgz",
"integrity": "sha512-HuEDBTI70aYdx1v6U97SbNx9F1+svQKBDo30o0b9fw055LMepzpOOd0Ccg9Q6tbqmBSJaMuY0fB7yw9/vjBYCA==",
"license": "MIT",
"peer": true,
"dependencies": {
"@ioredis/commands": "1.5.1",
"cluster-key-slot": "^1.1.0",
@@ -14868,7 +14873,6 @@
"integrity": "sha512-7igPTM53cGHMW8xWuVTydi2KO233VFiTNyF5hLJqpilHfmn8C8gPf+PS7dUT64YcXFbiMGZxS9pCSxL/Dxm/Jw==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"prettier": "bin/prettier.cjs"
},
@@ -15622,7 +15626,6 @@
"integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"fast-deep-equal": "^3.1.3",
"fast-uri": "^3.0.1",
@@ -16159,7 +16162,6 @@
"resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.5.6.tgz",
"integrity": "sha512-DkkO/dz7MGln0dHn5bmN3pPy+JmywNICWrJqVWiVOyvXjWQFIv9c2h24JrQLLFJ2aQVQf/Cvl1vblnd4r2apLQ==",
"license": "MIT",
"peer": true,
"dependencies": {
"debug": "~4.4.1",
"ws": "~8.18.3"
@@ -16988,7 +16990,6 @@
"integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
"devOptional": true,
"license": "Apache-2.0",
"peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
@@ -17213,7 +17214,6 @@
"integrity": "sha512-t7g7GVRpMXjNpa67HaVWI/8BWtdVIQPCL2WoozXXA7LBGEFK4AkkKkHx2hAQf5x1GZSlcmEDPkVLSGahxnEEZw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"lightningcss": "^1.32.0",
"picomatch": "^4.0.4",
@@ -17409,7 +17409,6 @@
"integrity": "sha512-tFuJqTxKb8AvfyqMfnavXdzfy3h3sWZRWwfluGbkeR7n0HUev+FmNgZ8SDrRBTVrVCjgH5cA21qGbCffMNtWvg==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@vitest/expect": "4.1.4",
"@vitest/mocker": "4.1.4",
@@ -17541,7 +17540,6 @@
"integrity": "sha512-wGN3qcrBQIFmQ/c0AiOAQBvrZ5lmY8vbbMv4Mxfgzqd/B6+9pXtLo73WuS1dSGXM5QYY3hZnIbvx+K1xxe6FyA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@types/eslint-scope": "^3.7.7",
"@types/estree": "^1.0.8",
@@ -17590,7 +17588,6 @@
"integrity": "sha512-pIDJHIEI9LR0yxHXQ+Qh95k2EvXpWzZ5l+d+jIo+RdSm9MiHfzazIxwwni/p7+x4eJZuvG1AJwgC4TNQ7NRgsg==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@discoveryjs/json-ext": "^0.5.0",
"@webpack-cli/configtest": "^2.1.1",
@@ -17962,7 +17959,6 @@
"integrity": "sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg==",
"dev": true,
"license": "ISC",
"peer": true,
"bin": {
"yaml": "bin.mjs"
},
@@ -18029,7 +18025,6 @@
"resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz",
"integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==",
"license": "MIT",
"peer": true,
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
+1 -1
View File
@@ -54,7 +54,7 @@
"check-translations": "node tools/check-translations.js",
"prepare": "husky",
"build:ts": "tsc -p tsconfig.json && node ./tools/write-dist-package-json.mjs",
"postinstall": "./tools/extensionSetup.sh"
"postinstall": "node ./tools/extensionSetup.mjs"
},
"workspaces": [
"src/*",
+25
View File
@@ -0,0 +1,25 @@
# Self-hosting Puter
Three supported ways to run Puter, in increasing order of effort and capability. Pick one, follow that page, ignore the others.
| Mode | Best for | External services |
| ------------------------------------------ | ----------------------------------------------------- | ----------------------------------------------------------- |
| [**1. Dev (npm start)**](./npm.md) | Hacking on the source / trying it on your laptop | None — everything runs in-process |
| [**2. Docker (single container)**](./docker.md) | Production single-host; bring your own DB / S3 | None bundled — point at services you already run |
| [**3. Full self-hosted stack**](./full-stack.md) | Production with a self-managed stack | Bundled: MariaDB, Valkey, DynamoDB-local, RustFS S3, nginx |
---
## 1. Dev (`npm start`) → [npm.md](./npm.md)
Clone, `npm install`, `npm start`. Backend, GUI, and `puter.js` run from the source tree on Node 24+. SQLite + in-process S3 / DynamoDB / Redis stand-ins start automatically — no external services needed. Best for contributing or kicking the tires.
**Not safe to expose publicly** — uses dev secrets and an in-process key store.
## 2. Docker (single container) → [docker.md](./docker.md)
One `docker run` against `ghcr.io/heyputer/puter:latest`. Out of the box uses the same in-process defaults as dev mode; drop a `config.json` into the mounted `/etc/puter/` to point at real services (MariaDB, S3, DynamoDB, Redis) one block at a time. Best when you already operate the dependencies you want Puter to use.
## 3. Full self-hosted stack → [full-stack.md](./full-stack.md)
`docker compose -f docker-compose.full.yml up -d` brings up Puter **plus every external service it needs** (MariaDB, Valkey, DynamoDB-local, RustFS S3, nginx) wired together. Closest to production you can run on a single host; supports your own domain and TLS. Best when you want a public Puter and don't already run the dependencies.
+180
View File
@@ -0,0 +1,180 @@
# 2. Docker (single container)
One Puter container. You bring your own database, S3, etc. — or run with the bundled in-process defaults for a quick spin. The image is multi-arch (`linux/amd64`, `linux/arm64`).
## Requirements
- **Docker** (any recent version).
## Bare minimum — defaults, single command
```bash
mkdir -p puter/config puter/data
docker run -d \
--name puter \
--restart unless-stopped \
-p 4100:4100 \
-v $(pwd)/puter/config:/etc/puter \
-v $(pwd)/puter/data:/var/puter \
ghcr.io/heyputer/puter:latest
```
Open <http://puter.localhost:4100>. With nothing in `puter/config/`, the in-process defaults kick in (same SQLite + dynalite + fauxqs + redis-mock as dev mode). State lands in `puter/data/`. Login is `admin` — temp password is printed once in `docker logs puter`.
That's enough to confirm the image works. Now configure for real.
## Add a config
The container reads **`/etc/puter/config.json`** and deep-merges it on top of the bundled defaults. You only put the keys you want to change.
```bash
cat > puter/config/config.json <<'JSON'
{
"domain": "puter.example.com",
"protocol": "https",
"pub_port": 443,
"env": "prod",
"jwt_secret": "REPLACE-WITH-openssl-rand-hex-64",
"url_signature_secret": "REPLACE-WITH-A-DIFFERENT-openssl-rand-hex-64"
}
JSON
docker restart puter
```
> 🔒 **Always replace `jwt_secret` and `url_signature_secret`.** The defaults are baked into the public image. Generate with `openssl rand -hex 64`.
Watch the logs:
```bash
docker logs -f puter
```
Look for `[config] override from /etc/puter/config.json` — that's the success signal.
## Wire to external services
Drop the relevant block(s) into `config.json`. Mix and match. Restart with `docker restart puter` after any change.
### MySQL / MariaDB
Puter applies its schema on first boot when you set `migrationPaths`:
```json
{
"database": {
"engine": "mysql",
"host": "db.internal", "port": 3306,
"user": "puter", "password": "...", "database": "puter",
"migrationPaths": ["/opt/puter/dist/src/backend/clients/database/migrations/mysql"]
}
}
```
Two files run in order: `mysql_mig_1.sql` (tables) and `mysql_mig_2.sql` (default apps — editor, viewer, pdf, camera, player, recorder, git, dev-center, puter-linux). Both are idempotent — safe to re-run.
### S3 (real or S3-compatible)
```json
{
"s3": {
"s3Config": {
"endpoint": "https://s3.example.com",
"accessKeyId": "...", "secretAccessKey": "...",
"region": "us-east-1"
}
},
"s3_bucket": "my-puter-bucket",
"s3_region": "us-east-1"
}
```
The bucket must exist already — Puter doesn't create it.
> ⚠️ **S3 uses camelCase keys** (`accessKeyId` / `secretAccessKey`). DynamoDB below uses snake_case. They're not the same.
### DynamoDB (real AWS)
Provision the table externally (e.g. Terraform):
```json
{
"dynamo": {
"aws": { "access_key": "...", "secret_key": "...", "region": "us-east-1" }
}
}
```
The KV table is named `store-kv-v1`. Schema: hash `namespace` (S), range `key` (S), LSI `lsi1-index` on `lsi1` (S), TTL on `ttl`.
### Redis / Valkey cluster
Puter speaks ioredis cluster protocol. Real Redis cluster:
```json
{ "redis": { "startupNodes": [{ "host": "redis-0", "port": 6379 }] } }
```
For a self-hosted single Valkey/Redis container, run it in cluster mode (one node, all 16384 slots assigned to itself) and turn off TLS:
```json
{
"redis": {
"startupNodes": [{ "host": "valkey", "port": 6379 }],
"tls": false
}
}
```
TLS defaults to on (matches the prod ElastiCache shape) — set `false` for plain-TCP self-host.
## What persists?
Anything your config points at `/var/puter/...` lives on the host via the `puter/data` mount (SQLite path, fauxqs data dirs if you use them, etc.). If you've moved every dependency to external services, the data volume is mostly empty and optional.
## Updating
```bash
docker pull ghcr.io/heyputer/puter:latest
docker rm -f puter
# re-run the docker run command above
```
Your `config.json` and persistent data are untouched.
## Building the image yourself
```bash
docker build -t puter .
# Multi-arch (requires buildx, on by default in modern Docker):
docker buildx build \
--platform linux/amd64,linux/arm64 \
-t your-registry/puter:latest \
--push .
```
A `docker-compose.yml` in this directory has a commented-out `build:` block — uncomment it (and flip `pull_policy` to `never`) to build from your local checkout instead of pulling.
## Troubleshooting
**`docker logs puter` shows the container restarting.**
Almost always JSON syntax in `config.json`. Validate: `jq . puter/config/config.json`.
**The config file isn't picked up.**
Confirm it resolves to `/etc/puter/config.json` *inside* the container:
```bash
docker exec puter cat /etc/puter/config.json
```
Empty / missing → the volume mount path is wrong.
**Healthcheck reports unhealthy but the site works.**
The healthcheck hits `puter.localhost:4100/test` from inside the container. If you changed `domain` or `port`, the check still uses defaults. The site itself is fine.
**`Error: DynamoDB aws config requires both access_key and secret_key`.**
You wrote `accessKeyId` / `secretAccessKey` (the AWS SDK form) under `dynamo.aws`. DynamoDB config uses snake_case. See above.
**Architecture mismatch on Apple Silicon / ARM hosts.**
Use the published `:latest` tag — it's already multi-arch. If you built locally with `docker build` on an Intel Mac, the resulting image will be `linux/amd64` only.
+243
View File
@@ -0,0 +1,243 @@
# 3. Full self-hosted stack
`docker-compose.full.yml` brings up Puter **plus every external service it needs** — MariaDB, Valkey, DynamoDB-local, RustFS S3, nginx — wired together. Closest thing to a production deployment you can self-manage on a single host.
## Requirements
- **Docker** with the `compose` plugin.
- A **domain** with DNS access — you need a wildcard record (`*.your-domain.com` → server IP). Puter routes by subdomain (`api.<domain>`, `site.<domain>`, `app.<domain>`).
- Optional: **TLS certs** (or `certbot` to grab them — see Step 4).
## What's running
| Container | Image | Role |
| --------------- | ------------------------ | ---------------------------------------------------------- |
| `puter-nginx` | `nginx:1.27-alpine` | Reverse proxy on 80 (and 443 if TLS); forwards to Puter |
| `puter` | `ghcr.io/heyputer/puter` | The app |
| `puter-mariadb` | `mariadb:11` | SQL database — schema applied automatically on first boot |
| `puter-valkey` | `valkey/valkey:8-alpine` | Redis-compatible cache + rate-limiter |
| `puter-dynamo` | `amazon/dynamodb-local` | KV store — table auto-created on first boot |
| `puter-s3` | `rustfs/rustfs` | S3-compatible object storage (MinIO drop-in noted in file) |
| `puter-s3-init` | `amazon/aws-cli` | One-shot — creates the bucket on first boot, then exits |
State lives under `./puter/data/<service>/`.
---
## Step 1 — Create `.env` and `puter/config/config.json`
> ⚠️ **Run this whole block in one shell session.** It generates secrets once and writes them into both `.env` (read by docker compose) and `config.json` (read by Puter). The two files **must** agree on the MariaDB password and the S3 secret — if they drift, MariaDB initialises with one password and Puter tries to log in with another, and you get `ER_ACCESS_DENIED_ERROR`.
```bash
MARIADB_ROOT_PASSWORD=$(openssl rand -hex 32)
MARIADB_PASSWORD=$(openssl rand -hex 32)
S3_SECRET_KEY=$(openssl rand -hex 32)
JWT_SECRET=$(openssl rand -hex 64)
URL_SIGNATURE_SECRET=$(openssl rand -hex 64)
cat > .env <<EOF
HTTP_PORT=80
# HTTPS_PORT=443 # uncomment after enabling TLS in Step 3
MARIADB_ROOT_PASSWORD=$MARIADB_ROOT_PASSWORD
MARIADB_DATABASE=puter
MARIADB_USER=puter
MARIADB_PASSWORD=$MARIADB_PASSWORD
S3_ACCESS_KEY=puter
S3_SECRET_KEY=$S3_SECRET_KEY
S3_BUCKET=puter-local
EOF
mkdir -p puter/config puter/data puter/tls
cat > puter/config/config.json <<EOF
{
"domain": "puter.local",
"protocol": "http",
"pub_port": 80,
"env": "prod",
"static_hosting_domain": "puter.sitelocal",
"static_hosting_domain_alt": "puter.hostlocal",
"private_app_hosting_domain": "puter.applocal",
"private_app_hosting_domain_alt": "puter.devlocal",
"jwt_secret": "$JWT_SECRET",
"url_signature_secret": "$URL_SIGNATURE_SECRET",
"database": {
"engine": "mysql",
"host": "mariadb",
"port": 3306,
"user": "puter",
"password": "$MARIADB_PASSWORD",
"database": "puter",
"migrationPaths": ["/opt/puter/dist/src/backend/clients/database/migrations/mysql"]
},
"redis": {
"startupNodes": [{ "host": "valkey", "port": 6379 }],
"tls": false
},
"dynamo": {
"endpoint": "http://dynamo:8000",
"bootstrapTables": true,
"aws": {
"access_key": "fake",
"secret_key": "fake",
"region": "us-east-1"
}
},
"s3": {
"s3Config": {
"endpoint": "http://s3:9000",
"accessKeyId": "puter",
"secretAccessKey": "$S3_SECRET_KEY",
"region": "us-east-1"
}
},
"s3_bucket": "puter-local",
"s3_region": "us-east-1"
}
EOF
```
Replace `puter.local`, `puter.sitelocal`, `puter.hostlocal`, `puter.applocal` and `puter.devlocal` with your actual domain (or leave it for a localhost-only trial).
Why these knobs:
- `env: "prod"` — the bundled `config.default.json` ships with `env: "dev"` (matches the source-tree `npm run start=gui` workflow, which expects webpack-dev-server emitting a CSS manifest). Self-host runs against pre-built static bundles, so `env: "prod"` makes the homepage emit the `/dist/bundle.min.css` `<link>` tag instead of waiting on a manifest that doesn't exist.
- `database.migrationPaths` — Puter applies the bundled MySQL schema on boot. `mysql_mig_1.sql` (tables) and `mysql_mig_2.sql` (default apps: editor, viewer, pdf, camera, player, recorder, git, dev-center, puter-linux). Idempotent — safe to re-run.
- `dynamo.bootstrapTables: true` — Puter creates its KV table on boot. **Only set against a local emulator**, never real AWS.
- `dynamo.aws` keys are dummies; DynamoDB-local doesn't validate them but the AWS SDK requires _something_. **Note:** DynamoDB uses `access_key` / `secret_key` (snake_case); S3 below uses `accessKeyId` / `secretAccessKey` (camelCase). Not interchangeable.
> If you ever change `MARIADB_PASSWORD` after first boot, `.env` alone won't update MariaDB — its credentials are baked into `./puter/data/mariadb/` on first init. Either rotate the password inside MariaDB by hand or `docker compose down && rm -rf ./puter/data/mariadb` to start fresh.
## Step 2 — Point DNS at the server \[Optional\]
In your DNS provider, add **two records**:
```
A puter.local → <your server's public IP>
A puter.sitelocal → <your server's public IP>
A *.puter.sitelocal → <your server's public IP>
A puter.hostlocal → <your server's public IP>
A *.puter.hostlocal → <your server's public IP>
A puter.applocal → <your server's public IP>
A *.puter.applocal → <your server's public IP>
A puter.devlocal → <your server's public IP>
A *.puter.devlocal → <your server's public IP>
```
The wildcard is required — Puter routes via subdomains.
If you only need these to resolve these locally to test you can add this (any any other needed subdomain) to your hosts file
```
127.0.0.1 puter.local
```
## Step 3 — TLS (recommended for public installs) \[Optional\]
Skip this for a quick local demo. Don't skip it for users typing passwords.
**Get a wildcard cert.** Easiest path with Let's Encrypt + DNS-01 (works for wildcards):
```bash
sudo certbot certonly --manual --preferred-challenges dns \
-d puter.local -d puter.sitelocal -d "*.puter.sitelocal" -d puter.hostlocal -d "*.puter.hostlocal" -d puter.applocal -d "*.puter.applocal" -d puter.devlocal -d "*.puter.devlocal"
```
Drop the resulting `fullchain.pem` and `privkey.pem` into `./puter/tls/`.
**Wire nginx to use them:**
1. Open [nginx/nginx.conf](../nginx/nginx.conf), uncomment the entire `# server { listen 443 ssl … }` block.
2. (Optional) Replace the body of the port-80 block with `return 301 https://$host$request_uri;` to force HTTPS.
3. In [docker-compose.full.yml](../docker-compose.full.yml), uncomment the `443:443` port mapping under the `nginx` service.
4. In `.env`, uncomment `HTTPS_PORT=443`.
5. In `config.json`, switch:
```json
{ "protocol": "https", "pub_port": 443 }
```
## Step 4 — Bring it up
```bash
docker compose -f docker-compose.full.yml up -d
```
First boot takes ~30s while MariaDB initialises and Puter applies the schema + default apps. Watch:
```bash
docker compose -f docker-compose.full.yml logs -f puter
```
Healthy startup:
```
[config] override from /etc/puter/config.json
[mysql] running migrations from /opt/puter/dist/src/backend/clients/database/migrations/mysql: 2 file(s)
[mysql] applied mysql_mig_1.sql (...)
[mysql] applied mysql_mig_2.sql (9 statements)
```
Then open **<https://puter.local>** (or `http://` if you skipped TLS). Login is `admin` — the temp password is printed once in the puter container logs on first boot:
```bash
docker compose -f docker-compose.full.yml logs puter | grep tmp_password
```
Change it in Settings after first login.
## Building from source instead of pulling
If you want to test local Dockerfile changes against the full stack, uncomment the `build:` block in [docker-compose.full.yml](../docker-compose.full.yml) under the `puter` service, change `pull_policy: always` → `pull_policy: never`, then:
```bash
docker compose -f docker-compose.full.yml up -d --build
```
---
## Re-starting backend
```bash
# update
docker compose -f docker-compose.full.yml pull
docker compose -f docker-compose.full.yml up -d
# logs
docker compose -f docker-compose.full.yml logs -f puter
# stop, keep data
docker compose -f docker-compose.full.yml down
# stop, NUKE all state (irreversible)
docker compose -f docker-compose.full.yml down
rm -rf puter/data
```
Migrations re-apply idempotently across pulls. Volumes are preserved.
## Troubleshooting
**Site loads but I get "Bad Gateway" / nginx errors.**
The puter container failed to come up. `docker compose -f docker-compose.full.yml logs puter` will tell you which dependency rejected it (most often DB password mismatch between `.env` and `config.json`).
**Login screen says "admin password not set".**
First-boot temp password is logged once. Find it: `docker compose -f docker-compose.full.yml logs puter | grep "tmp_password"`. After login, change it in Settings.
**Healthcheck reports unhealthy but the site works.**
The healthcheck hits `puter.localhost:4100/test` from inside the container. If you changed `domain` or `port`, the check still uses defaults. The site itself is fine.
**Nothing resolves at `puter.example.com` after DNS changes.**
DNS propagates slowly. `dig puter.example.com` and `dig api.puter.example.com` should both return your server IP. If not, give it 560 minutes.
**`docker compose up` hangs at "waiting for service to be healthy".**
`docker compose -f docker-compose.full.yml ps` shows which container is unhealthy. MariaDB takes ~2030s on a cold boot; everything else under 5s. If something stays unhealthy, `logs <service>` will tell you why.
**`Error: DynamoDB aws config requires both access_key and secret_key`.**
You wrote `accessKeyId` / `secretAccessKey` under `dynamo.aws`. That config block uses snake_case (`access_key` / `secret_key`). Only the `s3.s3Config` block uses camelCase.
+64
View File
@@ -0,0 +1,64 @@
# 1. Dev mode (`npm start`)
Run Puter directly from the source tree on Node. Everything runs in-process — no databases, no Redis, no external services. Best for hacking on Puter or a quick local trial on your LAN.
> ⚠️ **Not safe to expose publicly.** Default JWT secrets ship in the source tree and the in-process key store has no real security boundary.
## Requirements
- **Node.js 24+** (`nvm install 24` if you don't have it).
- **C toolchain** for native deps (`bcrypt`, `sharp`, `better-sqlite3`):
- macOS: `xcode-select --install`
- Debian / Ubuntu: `sudo apt install build-essential python3`
## Setup
```bash
cd packages/puter # if you cloned the heyputer parent repo
# (skip this if you cloned puter directly)
npm install
npm run build # one-time: compiles backend + GUI + puter.js
npm start # daily use: re-builds backend, then starts
```
Open <http://puter.localhost:4100>. Sign in as `admin` — the temp password is printed once in the boot logs.
## What runs in-process
Out of the box (no `config.json`):
- SQLite at `volatile/runtime/puter-database.sqlite` (auto-created).
- In-process S3 (`fauxqs`) with the `puter-local` bucket auto-created.
- In-process DynamoDB (`dynalite`) with its KV table auto-created.
- In-process Redis (`ioredis-mock`).
State lives under `./volatile/`. Delete the folder to reset.
## Configuring (optional)
Drop a `config.json` next to `package.json`. It deep-merges over `config.default.json` — only put what you want to change:
```json
{ "port": 5101, "domain": "myhost.local" }
```
Restart with `npm start`.
For real external services (MySQL, S3, DynamoDB, Redis), the config blocks are the same as in [docker.md → "Wiring to external services"](./docker.md#wire-to-external-services). The mode is meant for in-process defaults though — if you're wiring real services, you probably want [docker.md](./docker.md) instead.
## Daily workflow
- Backend changes → `npm start` re-runs the TS compile (~510s) and restarts.
- GUI / `puter.js` changes → `npm run build` (full webpack — slower).
- Reset state → `rm -rf volatile/` and start over.
## Troubleshooting
**`npm start` says missing `dist/`.**
You skipped `npm run build`. The `prestart` hook only re-builds the backend; the GUI + `puter.js` bundles need the full build at least once.
**Native module build failures during `npm install`.**
Missing C toolchain. Install it (see Requirements), delete `node_modules`, re-run `npm install`.
**Port 4100 already in use.**
Set `"port": <something else>` in `config.json`. The browser URL changes accordingly.
@@ -17,9 +17,12 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
import { readdirSync, readFileSync } from 'fs';
import { isAbsolute, resolve as resolvePath } from 'path';
import { createPool, type Pool } from 'mysql2';
import { AbstractDatabaseClient, type WriteResult } from './DatabaseClient';
import { SQLBatcher } from './SQLBatcher.js';
import { splitMysqlStatements } from './splitMysqlStatements.js';
import type { IConfig } from '../../types';
const RETRIABLE_ERROR_CODES = new Set([
@@ -91,6 +94,8 @@ export class MySQLDatabaseClient extends AbstractDatabaseClient {
}
this.dbReplica = new SQLBatcher(this.replicaPool, 10, 5);
await this.runMigrations();
}
override async onServerPrepareShutdown(): Promise<void> {
@@ -213,6 +218,74 @@ export class MySQLDatabaseClient extends AbstractDatabaseClient {
return (primaryResult?.[0] as Record<string, unknown>[]) ?? [];
}
// ------------------------------------------------------------------
// Migrations
// ------------------------------------------------------------------
/**
* Apply `.sql` files from each configured migration directory in order.
* Files within a directory are sorted lexically. Files MUST be
* idempotent — there is no per-file applied-state tracking. Failures
* abort startup so operators see schema problems loud.
*/
private async runMigrations(): Promise<void> {
const paths = this.config.database?.migrationPaths;
if (!paths || paths.length === 0) return;
const conn = await this.primaryPool.promise().getConnection();
try {
for (const rawPath of paths) {
const dir = isAbsolute(rawPath)
? rawPath
: resolvePath(process.cwd(), rawPath);
let files: string[];
try {
files = readdirSync(dir)
.filter(
(f) => f.endsWith('.sql') && f.startsWith('mysql'),
)
.sort();
} catch (e) {
throw new Error(
`[mysql] migration path is unreadable: ${dir}`,
{ cause: e },
);
}
if (files.length === 0) {
console.log(`[mysql] no migrations in ${dir}`);
continue;
}
console.log(
`[mysql] running migrations from ${dir}: ${files.length} file(s)`,
);
for (const file of files) {
const filePath = resolvePath(dir, file);
const contents = readFileSync(filePath, 'utf8');
const statements = splitMysqlStatements(contents);
for (let i = 0; i < statements.length; i++) {
try {
await conn.query(statements[i]);
} catch (e) {
throw new Error(
`[mysql] failed to apply ${file} at statement ${i}`,
{ cause: e },
);
}
}
console.log(
`[mysql] applied ${file} (${statements.length} statements)`,
);
}
}
} finally {
conn.release();
}
}
// ------------------------------------------------------------------
// Pool management
// ------------------------------------------------------------------
+112 -10
View File
@@ -22,6 +22,7 @@ import { metrics } from '@opentelemetry/api';
const DEFAULT_MAX_QUEUE_SIZE = 1000;
const DEFAULT_FAILURE_THRESHOLD = 5;
const DEFAULT_COOLDOWN_MS = 5_000;
const FALLBACK_RETRY_CONCURRENCY = 8;
const meter = metrics.getMeter('puter-backend');
const enqueueDroppedCounter = meter.createCounter(
@@ -38,6 +39,20 @@ const enqueueRejectedCounter = meter.createCounter(
const flushFailureCounter = meter.createCounter('sql_batcher.flush.failed', {
description: 'SQLBatcher flush attempts that threw',
});
const fallbackInvocationsCounter = meter.createCounter(
'sql_batcher.fallback.invocations',
{
description:
'Times SQLBatcher fell back to per-item retry after a batch error',
},
);
const fallbackItemFailuresCounter = meter.createCounter(
'sql_batcher.fallback.item_failures',
{
description:
'Per-item failures observed during SQLBatcher per-item retry',
},
);
export class SQLBatcher {
dbPool;
@@ -137,24 +152,111 @@ export class SQLBatcher {
const query = `${batch.map((b) => b.sql.replace(/;+\s*$/, '')).join(';')}; SELECT 1`; // SELECT 1 forces mysql2 to return array
const values = batch.map((b) => b.values ?? []).flat();
let connection;
try {
const [results, fields] = await this.dbPool
.promise()
.query(query, values);
connection = await this.dbPool.promise().getConnection();
} catch (error) {
this.#consecutiveFailures++;
this.#lastFailureAt = Date.now();
flushFailureCounter.add(1);
console.warn(
'SQLBatcher could not acquire connection for flush:',
error,
);
for (const b of batch) {
b.reject(this.#createPublicBatchError());
}
return;
}
// Run the coalesced multi-statement inside an explicit transaction so
// a single bad statement (e.g. a duplicate-key INSERT) rolls back the
// whole batch atomically, leaving us free to re-run each item
// individually below. Without this, MySQL would commit every
// statement up to the failure point and a per-item retry would
// misreport already-committed inserts as duplicate-key failures.
let batchSucceeded = false;
try {
await connection.beginTransaction();
const [results, fields] = await connection.query(query, values);
await connection.commit();
batchSucceeded = true;
this.#consecutiveFailures = 0;
for (let i = 0; i < batch.length; i++) {
const b = batch[i];
b.resolve([results[i], fields?.[i]]);
}
} catch (error) {
this.#consecutiveFailures++;
this.#lastFailureAt = Date.now();
flushFailureCounter.add(1);
console.warn('Error in SQLBatcher flush:', error);
for (const b of batch) {
b.reject(this.#createPublicBatchError());
} catch (batchError) {
try {
await connection.rollback();
} catch (rollbackError) {
console.warn('SQLBatcher rollback failed:', rollbackError);
}
console.warn(
'SQLBatcher batch failed; retrying items individually:',
batchError,
);
} finally {
connection.release();
}
if (batchSucceeded) return;
// Per-item fallback. The transaction was rolled back so no statement
// committed; re-running each item independently produces clean
// success/failure outcomes for each caller. Concurrency is capped to
// avoid briefly saturating the pool when a large batch fails.
flushFailureCounter.add(1);
fallbackInvocationsCounter.add(1);
const settled = new Array(batch.length);
let cursor = 0;
const workers = Array.from(
{ length: Math.min(FALLBACK_RETRY_CONCURRENCY, batch.length) },
async () => {
while (cursor < batch.length) {
const i = cursor++;
const b = batch[i];
try {
settled[i] = {
ok: true,
value: await this.dbPool
.promise()
.query(b.sql, b.values ?? []),
};
} catch (error) {
settled[i] = { ok: false, error };
}
}
},
);
await Promise.all(workers);
let anySucceeded = false;
let failureCount = 0;
for (let i = 0; i < batch.length; i++) {
const b = batch[i];
const r = settled[i];
if (r.ok) {
anySucceeded = true;
b.resolve(r.value);
} else {
failureCount++;
b.reject(r.error);
}
}
if (failureCount > 0) {
fallbackItemFailuresCounter.add(failureCount);
}
// Only escalate the breaker when the database itself looks unhealthy
// (no item got through). Row-level errors like duplicate-key are
// application concerns, not DB outages, and shouldn't trip it.
this.#lastFailureAt = Date.now();
if (anySucceeded) {
this.#consecutiveFailures = 0;
} else {
this.#consecutiveFailures++;
}
}
}
@@ -23,7 +23,7 @@ import { createContext, runInContext } from 'vm';
import { AbstractDatabaseClient, type WriteResult } from './DatabaseClient';
import type { IConfig } from '../../types';
const MIGRATIONS_DIR = resolve(__dirname, './migrations');
const MIGRATIONS_DIR = resolve(__dirname, './migrations/sqlite');
/**
* Ordered list of [threshold_version, files[]] pairs.
File diff suppressed because it is too large Load Diff
File diff suppressed because one or more lines are too long
@@ -19,7 +19,6 @@
DROP TABLE IF EXISTS `monthly_usage_counts`;
DROP TABLE IF EXISTS `access_token_permissions`;
DROP TABLE IF EXISTS `auth_audit`;
DROP TABLE IF EXISTS `general_analytics`;
DROP TABLE IF EXISTS `audit_user_to_app_permissions`;
DROP TABLE IF EXISTS `user_to_app_permissions`;
@@ -343,26 +342,6 @@ CREATE TABLE `general_analytics` (
FOREIGN KEY (`app_id`) REFERENCES `apps` (`id`) ON DELETE SET NULL ON UPDATE CASCADE
);
-- 0014
CREATE TABLE `auth_audit` (
`id` INTEGER PRIMARY KEY,
`created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`uid` CHAR(40) NOT NULL,
`ip_address` VARCHAR(45) DEFAULT NULL,
`ua_string` VARCHAR(255) DEFAULT NULL,
`action` VARCHAR(40) DEFAULT NULL,
`requester` JSON,
`body` JSON,
`extra` JSON,
`has_parse_error` TINYINT(1) DEFAULT 0
);
-- 0017
CREATE TABLE `access_token_permissions` (
@@ -0,0 +1,137 @@
/**
* Copyright (C) 2024-present Puter Technologies Inc.
*
* This file is part of Puter.
*
* Puter is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
import { describe, expect, it } from 'vitest';
import { splitMysqlStatements } from './splitMysqlStatements.js';
describe('splitMysqlStatements', () => {
it('splits simple statements on default delimiter', () => {
expect(splitMysqlStatements('SELECT 1; SELECT 2;')).toEqual([
'SELECT 1',
'SELECT 2',
]);
});
it('returns empty array for whitespace-only input', () => {
expect(splitMysqlStatements(' \n\t ')).toEqual([]);
});
it('keeps a trailing statement without terminating semicolon', () => {
expect(splitMysqlStatements('SELECT 1;\nSELECT 2')).toEqual([
'SELECT 1',
'SELECT 2',
]);
});
it('ignores semicolons inside single-quoted strings', () => {
expect(
splitMysqlStatements("INSERT INTO t VALUES ('a;b'); SELECT 2;"),
).toEqual(["INSERT INTO t VALUES ('a;b')", 'SELECT 2']);
});
it("handles SQL '' escape inside single-quoted strings", () => {
expect(
splitMysqlStatements("SELECT 'it''s; ok'; SELECT 2;"),
).toEqual(["SELECT 'it''s; ok'", 'SELECT 2']);
});
it('handles backslash escape inside strings', () => {
expect(
splitMysqlStatements("SELECT 'a\\'b;c'; SELECT 2;"),
).toEqual(["SELECT 'a\\'b;c'", 'SELECT 2']);
});
it('ignores semicolons inside backtick identifiers', () => {
expect(
splitMysqlStatements('SELECT `weird;col` FROM t; SELECT 2;'),
).toEqual(['SELECT `weird;col` FROM t', 'SELECT 2']);
});
it('ignores semicolons inside double-quoted strings', () => {
expect(splitMysqlStatements('SELECT "a;b"; SELECT 2;')).toEqual([
'SELECT "a;b"',
'SELECT 2',
]);
});
it('ignores semicolons in line comments', () => {
expect(
splitMysqlStatements(
'SELECT 1; -- a;b\nSELECT 2; # c;d\nSELECT 3;',
),
).toEqual(['SELECT 1', '-- a;b\nSELECT 2', '# c;d\nSELECT 3']);
});
it('ignores semicolons in block comments (multi-line)', () => {
expect(
splitMysqlStatements('SELECT 1 /* a;\nb;c */; SELECT 2;'),
).toEqual(['SELECT 1 /* a;\nb;c */', 'SELECT 2']);
});
it('honours DELIMITER directive', () => {
const sql = `
SELECT 1;
DELIMITER //
CREATE PROCEDURE p() BEGIN SELECT 1; SELECT 2; END//
DELIMITER ;
SELECT 3;
`;
expect(splitMysqlStatements(sql)).toEqual([
'SELECT 1',
'CREATE PROCEDURE p() BEGIN SELECT 1; SELECT 2; END',
'SELECT 3',
]);
});
it('handles a stored procedure that uses // delimiter end-to-end', () => {
const sql = `DROP PROCEDURE IF EXISTS foo;
DELIMITER //
CREATE PROCEDURE foo(IN x INT)
BEGIN
IF x > 0 THEN
SET @s := 'hi;';
SELECT @s;
END IF;
END//
DELIMITER ;
DROP PROCEDURE IF EXISTS foo;
`;
const stmts = splitMysqlStatements(sql);
expect(stmts).toHaveLength(3);
expect(stmts[0]).toBe('DROP PROCEDURE IF EXISTS foo');
expect(stmts[1]).toContain('CREATE PROCEDURE foo');
expect(stmts[1]).toContain("SET @s := 'hi;';");
expect(stmts[2]).toBe('DROP PROCEDURE IF EXISTS foo');
});
it('strips DELIMITER lines from output even if no statement follows', () => {
expect(splitMysqlStatements('DELIMITER //\nDELIMITER ;\n')).toEqual(
[],
);
});
it('does not treat -- without trailing whitespace as a comment', () => {
// `--5` is "minus minus 5" (rare in practice but valid SQL).
// MySQL requires whitespace after `--` for it to be a comment.
expect(splitMysqlStatements('SELECT 1--5; SELECT 2;')).toEqual([
'SELECT 1--5',
'SELECT 2',
]);
});
});
@@ -0,0 +1,211 @@
/**
* Copyright (C) 2024-present Puter Technologies Inc.
*
* This file is part of Puter.
*
* Puter is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
/**
* DELIMITER-aware splitter for MySQL dump / migration files.
*
* Splits `sql` into individual statements using the active statement
* delimiter (default `;`). Recognises `DELIMITER X` lines, single-quoted
* strings, double-quoted strings, backtick-quoted identifiers, line
* comments (`-- `, `#`) and block comments (`/* ... *\/`). DELIMITER
* directives are stripped from the output (they're a client-side concept,
* not server SQL).
*
* Returns trimmed, non-empty statements without the trailing delimiter.
*/
export function splitMysqlStatements(sql: string): string[] {
const out: string[] = [];
let buf = '';
let delim = ';';
let i = 0;
const n = sql.length;
// We process the input line-by-line for DELIMITER detection, but track
// multi-line state (strings / block comments) across lines.
type State =
| 'normal'
| 'sq' // single-quoted string
| 'dq' // double-quoted string
| 'bt' // backtick-quoted identifier
| 'block'; // /* ... */
let state: State = 'normal';
const pushStatement = () => {
const trimmed = buf.trim();
if (trimmed.length > 0) out.push(trimmed);
buf = '';
};
while (i < n) {
// At the start of each line in `normal` state, check for DELIMITER
// and full-line comments. We're at line start iff `i === 0` or the
// previous char was a newline.
const atLineStart = i === 0 || sql[i - 1] === '\n';
if (atLineStart && state === 'normal') {
// Find end of current line (without consuming).
let lineEnd = sql.indexOf('\n', i);
if (lineEnd === -1) lineEnd = n;
const line = sql.slice(i, lineEnd);
// DELIMITER directive — only valid when the current statement
// buffer is empty (i.e. between statements). MySQL CLI accepts
// it almost anywhere, but in practice it's always between
// statements; rejecting mid-statement keeps the parser simple
// and predictable.
const delimMatch = /^\s*DELIMITER\s+(\S+)\s*$/i.exec(line);
if (delimMatch && buf.trim() === '') {
delim = delimMatch[1];
// skip the line including the trailing newline (if any)
i = lineEnd + 1;
buf = '';
continue;
}
}
const c = sql[i];
const next = i + 1 < n ? sql[i + 1] : '';
if (state === 'sq') {
buf += c;
if (c === '\\' && i + 1 < n) {
buf += sql[i + 1];
i += 2;
continue;
}
if (c === "'") {
if (next === "'") {
// SQL-style escaped quote
buf += "'";
i += 2;
continue;
}
state = 'normal';
}
i++;
continue;
}
if (state === 'dq') {
buf += c;
if (c === '\\' && i + 1 < n) {
buf += sql[i + 1];
i += 2;
continue;
}
if (c === '"') {
if (next === '"') {
buf += '"';
i += 2;
continue;
}
state = 'normal';
}
i++;
continue;
}
if (state === 'bt') {
buf += c;
if (c === '`') {
if (next === '`') {
buf += '`';
i += 2;
continue;
}
state = 'normal';
}
i++;
continue;
}
if (state === 'block') {
buf += c;
if (c === '*' && next === '/') {
buf += '/';
i += 2;
state = 'normal';
continue;
}
i++;
continue;
}
// state === 'normal'
// Line comments: `-- ` or `--\n` or `--$` (MySQL requires whitespace
// or EOL after `--`); also `#` to EOL.
if (
(c === '-' &&
next === '-' &&
(sql[i + 2] === undefined || /\s/.test(sql[i + 2]))) ||
c === '#'
) {
// consume to end-of-line; keep the comment in the buffer so the
// statement text remains faithful (mysql server tolerates it)
const lineEnd = sql.indexOf('\n', i);
const end = lineEnd === -1 ? n : lineEnd;
buf += sql.slice(i, end);
i = end;
continue;
}
if (c === '/' && next === '*') {
buf += '/*';
i += 2;
state = 'block';
continue;
}
if (c === "'") {
buf += c;
i++;
state = 'sq';
continue;
}
if (c === '"') {
buf += c;
i++;
state = 'dq';
continue;
}
if (c === '`') {
buf += c;
i++;
state = 'bt';
continue;
}
// Delimiter match
if (sql.startsWith(delim, i)) {
// emit current buffer (without the delimiter)
pushStatement();
i += delim.length;
continue;
}
buf += c;
i++;
}
// Flush trailing content (no terminating delimiter is allowed for the
// last statement, but we still try)
pushStatement();
return out;
}
+5 -2
View File
@@ -401,9 +401,12 @@ export class DDBClient extends PuterClient {
params: CreateTableCommandInput,
ttlAttribute?: string,
) {
if (this.#ddbConfig.aws) {
// Real-AWS deployments provision tables externally (Terraform / IaC),
// so we no-op there by default. Self-hosters pointing at
// dynamodb-local opt in via `dynamo.bootstrapTables: true`.
if (this.#ddbConfig.aws && !this.#ddbConfig.bootstrapTables) {
console.warn(
'Creating DynamoDB tables in AWS is disabled by default, but if needed, update DDBClient',
'Creating DynamoDB tables is disabled by default; set `dynamo.bootstrapTables: true` in config to enable (intended for local emulators).',
);
return;
}
+6 -1
View File
@@ -78,6 +78,11 @@ const buildCluster = (config: IConfig): Cluster => {
]) as unknown as Cluster;
}
// TLS defaults on (matches the existing prod ElastiCache behavior).
// Self-hosters running cluster mode against a plain-TCP Valkey set
// `redis.tls: false` to opt out.
const tlsEnabled = redisConfig.tls !== false;
const cluster = new Redis.Cluster(
startupNodes as ConstructorParameters<typeof Redis.Cluster>[0],
{
@@ -90,7 +95,7 @@ const buildCluster = (config: IConfig): Cluster => {
slotsRefreshTimeout: redisSlotsRefreshTimeoutMs,
enableOfflineQueue: true,
redisOptions: {
tls: {},
...(tlsEnabled ? { tls: {} } : {}),
connectTimeout: redisConnectTimeoutMs,
maxRetriesPerRequest: 1,
},
+68 -5
View File
@@ -869,6 +869,50 @@ export class AppDriver extends PuterDriver {
return !!this.#extractPuterHostedSubdomain(indexUrl);
}
/**
* Read normalized origin-alias groups from config. Each group is a deduped
* list of lowercased, trimmed bare hosts. Malformed entries are skipped so
* a bad config row doesn't brick app create/update for everyone else.
*/
#getOriginAliasGroups() {
const config = this.config ?? {};
const raw = config.app_origin_aliases;
if (!Array.isArray(raw)) return [];
const groups = [];
for (const group of raw) {
if (!Array.isArray(group)) continue;
const normalized = [
...new Set(
group
.filter((h) => typeof h === 'string')
.map((h) => h.trim().toLowerCase())
.filter((h) => h.length > 0),
),
];
if (normalized.length > 0) groups.push(normalized);
}
return groups;
}
/**
* Return the alias group containing this index_url's host, or null when
* the host isn't claimed by any group.
*/
#findOriginAliasGroupForIndexUrl(indexUrl) {
if (typeof indexUrl !== 'string' || !indexUrl) return null;
let hostname;
try {
hostname = new URL(indexUrl).hostname.toLowerCase();
} catch {
return null;
}
for (const group of this.#getOriginAliasGroups()) {
if (group.includes(hostname)) return group;
}
return null;
}
/**
* Generate the set of equivalent index_url strings that should
* collide with a given input. We only collapse trailing-slash and
@@ -906,13 +950,32 @@ export class AppDriver extends PuterDriver {
}
async #findIndexUrlConflictRow({ indexUrl, excludeAppId } = {}) {
if (!this.#isPuterHostedIndexUrl(indexUrl)) return null;
const aliasGroup = this.#findOriginAliasGroupForIndexUrl(indexUrl);
if (!this.#isPuterHostedIndexUrl(indexUrl) && !aliasGroup) return null;
const candidates = this.#buildEquivalentIndexUrlCandidates(indexUrl);
if (candidates.length === 0) return null;
if (hasIndexUrlUniquenessExemption(candidates)) return null;
const candidates = new Set(
this.#buildEquivalentIndexUrlCandidates(indexUrl),
);
return this.appStore.findByIndexUrlCandidates(candidates, {
// For alias-group hosts, treat the group as a host-level reservation:
// any row whose index_url is the root URL of any group member counts
// as a conflict, so a single app owns the whole group.
if (aliasGroup) {
for (const host of aliasGroup) {
for (const proto of ['https', 'http']) {
const base = `${proto}://${host}`;
candidates.add(base);
candidates.add(`${base}/`);
candidates.add(`${base}/index.html`);
}
}
}
if (candidates.size === 0) return null;
const candidateList = [...candidates];
if (hasIndexUrlUniquenessExemption(candidateList)) return null;
return this.appStore.findByIndexUrlCandidates(candidateList, {
excludeAppId,
});
}
+83 -1
View File
@@ -230,7 +230,11 @@ export class AuthService extends PuterService {
legacyCode: 'no_origin_for_app',
});
}
const event = { origin: parsed };
// Aliased hosts collapse to a single canonical representative so the
// event listeners and the UUIDv5 fallback resolve to the same value
// for every member of an alias group.
const aliased = this.#canonicalizeAliasedOrigin(parsed) ?? parsed;
const event = { origin: aliased };
await this.clients.event?.emitAndWait('app.from-origin', event, {});
const canonicalUid = await this.#findCanonicalAppUidForOrigin(
@@ -242,6 +246,74 @@ export class AuthService extends PuterService {
return `app-${uid}`;
}
/**
* Read `app_origin_aliases` from config and return normalized groups
* each group is a deduped list of lowercased, trimmed host strings.
* Malformed entries are skipped silently so a bad config row doesn't
* brick UID resolution for everyone else.
*/
#getOriginAliasGroups(): string[][] {
const config = this.config as { app_origin_aliases?: unknown };
const raw = config.app_origin_aliases;
if (!Array.isArray(raw)) return [];
const groups: string[][] = [];
for (const group of raw) {
if (!Array.isArray(group)) continue;
const normalized = [
...new Set(
group
.filter((h): h is string => typeof h === 'string')
.map((h) => h.trim().toLowerCase())
.filter((h) => h.length > 0),
),
];
if (normalized.length > 0) groups.push(normalized);
}
return groups;
}
/**
* Find the alias group containing `host` (case-insensitive). Returns the
* normalized group, or null when no group claims this host.
*/
#findOriginAliasGroup(host: string): string[] | null {
const lower = host.trim().toLowerCase();
if (!lower) return null;
for (const group of this.#getOriginAliasGroups()) {
if (group.includes(lower)) return group;
}
return null;
}
/**
* If the origin's host belongs to an alias group, swap it for the group's
* canonical representative (alphabetically first member chosen for
* order-independence so config reordering doesn't shift UUIDs). Returns
* null when the host isn't in any group, so the caller keeps the original.
*/
#canonicalizeAliasedOrigin(origin: string): string | null {
let parsed: URL;
try {
parsed = new URL(origin);
} catch {
return null;
}
const hostRaw = parsed.host.toLowerCase();
const hostStripped = parsed.hostname.toLowerCase();
const group =
this.#findOriginAliasGroup(hostRaw) ??
this.#findOriginAliasGroup(hostStripped);
if (!group) return null;
const canonical = [...group].sort()[0];
if (!canonical || canonical === hostRaw || canonical === hostStripped) {
return null;
}
parsed.host = canonical;
return parsed.toString();
}
/**
* Find the real app row whose `index_url` canonically matches `origin`.
*
@@ -323,6 +395,16 @@ export class AuthService extends PuterService {
hostCandidates.add(`${subdomain}.${d}`);
}
}
// Origin alias group expansion: every host listed alongside the
// request's host in `app_origin_aliases` becomes a lookup candidate,
// so any one of the group's hosts being registered as an `index_url`
// resolves the whole group to that row's UID.
const aliasGroup =
this.#findOriginAliasGroup(hostRaw) ??
this.#findOriginAliasGroup(hostStripped);
if (aliasGroup) {
for (const h of aliasGroup) hostCandidates.add(h);
}
const protocolCandidates = new Set<string>([
parsed.protocol.replace(/:$/, ''),
+5 -2
View File
@@ -203,9 +203,12 @@ export class SystemKVStore extends PuterStore {
override async onServerStart(): Promise<void> {
// For local/dynalite runs we need to create the table up front.
// For real AWS we assume the table already exists.
// Real AWS deployments provision tables externally (Terraform), so
// we skip — unless the operator explicitly opts in via
// `dynamo.bootstrapTables` (e.g. self-hosting against
// dynamodb-local in docker-compose).
const ddbConfig = this.config.dynamo ?? {};
if (ddbConfig.aws) return;
if (ddbConfig.aws && !ddbConfig.bootstrapTables) return;
this.initialized = this.clients.dynamo.createTableIfNotExists(
{ ...PUTER_KV_STORE_TABLE_DEFINITION, TableName: this.tableName },
+37
View File
@@ -29,6 +29,13 @@ export interface IDynamoConfig {
aws?: IAWSCredentials;
endpoint?: string;
path?: string;
/**
* Create required tables on startup if they don't exist. Off by
* default because real-AWS deployments provision tables externally
* (Terraform / IaC). Set to `true` when pointing at a local
* DynamoDB emulator so self-hosters don't have to bootstrap by hand.
*/
bootstrapTables?: boolean;
}
export interface IRedisConfig {
@@ -36,6 +43,11 @@ export interface IRedisConfig {
host: string;
port: number;
}>;
/**
* Use TLS for cluster connections. Defaults to `true` (matches prod
* ElastiCache). Set `false` for self-host plain-TCP Valkey/Redis.
*/
tls?: boolean;
useMock?: boolean;
}
@@ -265,6 +277,14 @@ export interface IDatabaseConfig {
password?: string;
database?: string;
};
/**
* Ordered list of directories whose `.sql` files are run sequentially at
* server start (mysql engine only). Files within a directory are sorted
* lexically; directories are processed in array order. Files MUST be
* idempotent there is no per-file applied-state tracking.
* Relative paths resolve from `process.cwd()`.
*/
migrationPaths?: string[];
}
/**
@@ -349,6 +369,23 @@ interface IConfigOptional {
private_app_hosting_domain: string;
/** Alt private app hosting domain. */
private_app_hosting_domain_alt: string;
/**
* Groups of equivalent app index_url hosts. Each group lists hosts that
* should resolve to the same canonical app: `appUidFromOrigin` looks up
* any DB row whose `index_url` is one of the group's hosts and returns
* that row's UID for every host in the group.
*
* Hosts listed here are also reserved `apps.create` / `apps.update`
* reject any attempt to register a different app under one of these
* hosts, so the group is owned by exactly one app row.
*
* Entries are bare hosts (no scheme), lowercased. Example:
* [
* ["camera.puter.com", "camera.puter.site", "camera.ca"],
* ["player.puter.com", "player.puter.site"],
* ]
*/
app_origin_aliases?: string[][];
/** When true, accept any Host header value. Dev/testing only. */
allow_all_host_values: boolean;
/** When true, accept requests without a Host header. */
+59
View File
@@ -0,0 +1,59 @@
#!/usr/bin/env node
// Install dependencies for every subfolder under ./extensions/.
// Runs installs in parallel; uses `npm ci` when a lockfile is present,
// otherwise falls back to `npm install`. Cross-platform replacement for
// the previous bash version.
import { existsSync, readdirSync, statSync } from 'node:fs';
import { spawn } from 'node:child_process';
import { join } from 'node:path';
const EXT_DIR = './extensions';
if (!existsSync(EXT_DIR)) {
process.exit(0);
}
const dirs = readdirSync(EXT_DIR)
.map((name) => join(EXT_DIR, name))
.filter((p) => statSync(p).isDirectory())
.filter((p) => existsSync(join(p, 'package.json')));
if (dirs.length === 0) {
process.exit(0);
}
const npmCmd = process.platform === 'win32' ? 'npm.cmd' : 'npm';
function install(dir) {
return new Promise((resolve, reject) => {
const args = existsSync(join(dir, 'package-lock.json')) ? ['ci'] : ['install'];
console.log(`[${dir}] starting npm ${args.join(' ')}`);
const child = spawn(npmCmd, args, { cwd: dir });
let out = '';
child.stdout.on('data', (d) => (out += d));
child.stderr.on('data', (d) => (out += d));
child.on('error', reject);
child.on('close', (code) => {
if (out) process.stdout.write(out);
if (code === 0) {
console.log(`[${dir}] done`);
resolve();
} else {
reject(new Error(`[${dir}] npm ${args.join(' ')} exited with code ${code}`));
}
});
});
}
const results = await Promise.allSettled(dirs.map(install));
const failures = results
.map((r, i) => ({ r, dir: dirs[i] }))
.filter(({ r }) => r.status === 'rejected');
if (failures.length > 0) {
for (const { r, dir } of failures) {
console.error(`[${dir}] ${r.reason?.message ?? r.reason}`);
}
process.exit(1);
}
-8
View File
@@ -1,8 +0,0 @@
#~!/bin/bash
# iterate through each folder in extensions/ if they contain a package.json, run npm install
for d in ./extensions/*/ ; do
if [ -f "$d/package.json" ]; then
echo "Installing dependencies for $d"
(cd "$d" && npm install)
fi
done