Publish CapRover Image
Some checks failed
Run build / build (push) Has been cancelled
Run formatter / check-code-formatting (push) Has been cancelled
Run lint / run-lint (push) Has been cancelled
Build and push the edge image / run-pre-checks (push) Has been cancelled
Build and push the edge image / build-publish-docker-hub (push) Has been cancelled

This commit is contained in:
Kasra Bigdeli
2024-12-21 22:59:07 -08:00
parent 3f7e6093b0
commit abea71983d
5 changed files with 1 additions and 153 deletions

View File

@@ -1,17 +0,0 @@
FROM allinurl/goaccess:1.9.3
RUN which crond && \
rm -rf /etc/periodic
COPY ./dockerfiles/goaccess-files/entrypoint.sh /entrypoint.sh
COPY ./dockerfiles/goaccess-files/catchupLog.sh /catchupLog.sh
COPY ./dockerfiles/goaccess-files/processLogs.sh /processLogs.sh
ENTRYPOINT ["/entrypoint.sh"]
# https://blog.thesparktree.com/cron-in-docker
# source: `docker run --rm -it alpine crond -h`
# -f | Foreground
# -l N | Set log level. Most verbose 0, default 8
CMD ["crond", "-f", "-l", "2"]

View File

@@ -1,32 +0,0 @@
#!/bin/sh
# This script is a simpler version of the processLogs script
# It loops over log files matching the environment variable $FILE_PREFIX
# which limits it to one app and domain and creates a
# GoAccess report for it with a consistent name so it will be overwritten
# next time this runs.
# The --restore and --persist flags let GoAccess process the logs incrementally
# so it doesn't have to process the whole log file from scratch each time
echo "Starting Catchup for $FILE_PREFIX"
for logFile in /var/log/nginx-shared/$FILE_PREFIX*access.log; do
filename=$(basename "$logFile")
appName=${filename%%--*}
appPath="/var/log/nginx-shared/$appName"
dbPath="$appPath/$filename-db"
# Make directory for all the reports to live in, and the GoAccess db
mkdir -p $appPath
mkdir -p $dbPath
report="$appPath/$filename--Live.html"
echo "Processing catchup $report"
goaccess $logFile -a -o "$report" --log-format=COMBINED --restore --persist --db-path $dbPath
done

View File

@@ -1,7 +0,0 @@
#!/bin/sh
env >> /etc/environment
# execute CMD
echo "$@"
exec "$@"

View File

@@ -1,96 +0,0 @@
#!/bin/sh
currentDateTime=$(date +"%Y-%m-%dT%H:%M")
# This script does a few things to process the nginx logs into GoAccess reports
# First it loops through all the non empty .log files in the shared volume
# (there should only be one per app/domain combo)
# For each one it:
# - Creates a directory for all the app reports if it doesn't exist
# - Copies the live log file into a temporary $rotateLog
# - Empties the original log file
# - Creates a new GoAccess report with the temp log file
# - Then gzip's the temp log to archive it and so it won't be picked up by the loop
# The reason for doing the copy then truncate is to avoid needing to send the
# "kill -USR1 `cat /var/run/nginx.pid`" signal to NGINX to have it pick up a new log file
# because this container doesn't have access to the NGINX of course.
# After creating the reports, this also checks to see if there are any logs and reports older
# than the days specified by the $LOG_RETENTION_DAYS environment variable and deletes them if so
echo "Checking logs to process"
for logFile in /var/log/nginx-shared/*.log; do
# Ensure the log isn't empty
if [ -s $logFile ]; then
filename=$(basename "$logFile")
appName=${filename%%--*}
appPath="/var/log/nginx-shared/$appName"
dbPath="$appPath/$filename-db"
# Make directory for all the reports to live in
mkdir -p $appPath
rotatedLog="$logFile--$currentDateTime.log"
report="$appPath/$filename--$currentDateTime.html"
if [ -f "$report" ]; then
echo "$report already exists, skipping"
else
echo "Processing $report"
# Manually rotate the log files
cp $logFile $rotatedLog
truncate -s 0 $logFile
# Remove the GoAccess cache db files for the catchup processing now that the log is reset
rm -rf $dbPath
goaccess $rotatedLog -a -o "$report" --log-format=COMBINED
gzip $rotatedLog
fi
fi
done
# Loop through the gzipped log files and delete ones past the log retention time
if [ "$LOG_RETENTION_DAYS" -gt 0 ]; then
echo "Checking log retention"
currentTimestamp=$(date +%s)
for tarFile in /var/log/nginx-shared/*.gz; do
if [ -f "$tarFile" ]; then
fileTimestamp=$(stat -c %Y "$tarFile")
retentionTimestamp=$((currentTimestamp - LOG_RETENTION_DAYS * 24 * 60 * 60))
if [ "$fileTimestamp" -lt "$retentionTimestamp" ]; then
echo "$tarFile past retention, deleting"
rm "$tarFile"
fi
fi
done
# Now remove the reports that are past retention time
for folder in /var/log/nginx-shared/*; do
if [ -d "$folder" ]; then
for htmlFile in "$folder"/*.html; do
if [ -f "$htmlFile" ]; then
# Get the file's modification time in seconds since the epoch
fileTimestamp=$(stat -c %Y "$htmlFile")
retentionTimestamp=$((currentTimestamp - LOG_RETENTION_DAYS * 24 * 60 * 60))
if [ "$fileTimestamp" -lt "$retentionTimestamp" ]; then
echo "$htmlFile past retention, deleting"
rm "$htmlFile"
fi
fi
done
fi
done
fi
echo "Done"

View File

@@ -37,7 +37,7 @@ const configs = {
netDataImageName: 'caprover/netdata:v1.34.1',
goAccessImageName: 'dshook/goaccess',
goAccessImageName: 'caprover/goaccess:1.9.3',
registryImageName: 'registry:2',