Skip to content

Instantly share code, notes, and snippets.

@regiellis
Last active November 4, 2024 11:31
Show Gist options
  • Save regiellis/92c6b32b2cd622ed0e36235d7b138544 to your computer and use it in GitHub Desktop.
Save regiellis/92c6b32b2cd622ed0e36235d7b138544 to your computer and use it in GitHub Desktop.
InvokeAI scripts for service and hot reload on Ubuntu
#!/usr/bin/env zsh
# Get the main directory
SCRIPT_DIR="${0:A:h}"
# Set paths relative
WATCH_DIR="$SCRIPT_DIR/nodes/" # Adjust this path as needed
RUN_INVOKEAI_SCRIPT="$SCRIPT_DIR/invoke-web.sh"
PID_FILE="$SCRIPT_DIR/invokeai.pid"
LOG_FILE="$SCRIPT_DIR/invokeai.log"
DEBOUNCE_SECONDS=5
# Define colors
GREEN='\033[0;32m' # Green for success messages
RED='\033[0;31m' # Red for error messages
YELLOW='\033[0;33m' # Yellow for warnings
NC='\033[0m' # No Color
is_invokeai_running() {
pgrep -f "invokeai-web" > /dev/null
}
start_invokeai() {
echo -e "${GREEN}Starting InvokeAI...${NC}"
"$RUN_INVOKEAI_SCRIPT" start
sleep 2 # Give it a moment to start
}
stop_invokeai() {
if is_invokeai_running; then
echo -e "${YELLOW}Stopping InvokeAI...${NC}"
"$RUN_INVOKEAI_SCRIPT" stop
sleep 2 # Give it a moment to stop
else
echo -e "${YELLOW}InvokeAI is not running.${NC}"
fi
}
restart_invokeai() {
stop_invokeai
start_invokeai
}
watch_for_changes() {
local last_restart=$(date +%s)
while true; do
if find "$WATCH_DIR" -type f \( -name "*.py" -o -name "*.js" -o -name "*.css" \) -newermt "-${DEBOUNCE_SECONDS} seconds" | grep -q .; then
current_time=$(date +%s)
if ((current_time - last_restart >= DEBOUNCE_SECONDS)); then
echo -e "${YELLOW}Changes detected. Restarting InvokeAI...${NC}"
restart_invokeai
last_restart=$current_time
fi
fi
sleep 1
done
}
tail_log() {
tail -F "$LOG_FILE" &
TAIL_PID=$!
}
cleanup() {
echo -e "${YELLOW}Cleaning up...${NC}"
kill $TAIL_PID 2>/dev/null
stop_invokeai
exit 0
}
# Set up trap to handle script termination
trap cleanup SIGINT SIGTERM
# Start InvokeAI if it's not already running
if ! is_invokeai_running; then
start_invokeai
fi
# Start tailing the log file
tail_log
echo -e "${GREEN}Watching $WATCH_DIR for changes...${NC}"
echo -e "${GREEN}Tailing $LOG_FILE...${NC}"
echo -e "${YELLOW}Press Ctrl+C to stop watching and tailing.${NC}"
# Start watching for changes
watch_for_changes
#!/usr/bin/env zsh
# MIT License
# Authored by Regi Ellis <regi@ playlogic.io || [email protected]>
# Copyright 2024, Playlogic IO, LLC All rights reserved.
SCRIPT_DIR="${0:A:h}"
INVOKEAI_DIR="$SCRIPT_DIR"
VENV_PATH="$INVOKEAI_DIR/.venv"
PID_FILE="$SCRIPT_DIR/invokeai.pid"
LOG_FILE="$SCRIPT_DIR/invokeai.log"
MAX_WAIT_TIME=60 # Maximum wait time in seconds
NVIDIA_LIMITS_SCRIPT="$HOME/set_nvidia_limits.sh"
# Custom GPU undervolting script
set_nvidia_limits() {
if [[ -f "$NVIDIA_LIMITS_SCRIPT" ]]; then
echo "Setting NVIDIA GPU limits..."
source "$NVIDIA_LIMITS_SCRIPT"
else
echo "NVIDIA limits script not found at $NVIDIA_LIMITS_SCRIPT. Skipping GPU limit setting."
fi
}
check_service_status() {
if systemctl is-active --quiet invoke.service; then
echo "system"
elif [[ -f "$PID_FILE" ]]; then
echo "user"
else
echo "not_running"
fi
}
start_service() {
echo "Starting InvokeAI as a service..."
source "$VENV_PATH/bin/activate"
export INVOKEAI_ROOT="$INVOKEAI_DIR"
exec invokeai-web
}
start_background() {
echo "Starting InvokeAI in the background..."
source "$VENV_PATH/bin/activate"
export INVOKEAI_ROOT="$INVOKEAI_DIR"
nohup invokeai-web > "$LOG_FILE" 2>&1 &
echo $! > "$PID_FILE"
chmod 644 "$PID_FILE"
echo "InvokeAI started in background. PID: $(cat "$PID_FILE")"
wait_for_invokeai_ready
}
start_foreground() {
echo "Starting InvokeAI in the foreground..."
source "$VENV_PATH/bin/activate"
export INVOKEAI_ROOT="$INVOKEAI_DIR"
invokeai-web
}
start() {
local service_status=$(check_service_status) # Changed variable name to avoid conflict
case "$service_status" in
system)
echo "InvokeAI is already running as a system service."
;;
user)
echo "InvokeAI is already running as a user process."
;;
not_running)
echo "Starting InvokeAI..."
start_background
;;
esac
}
stop() {
local service_status=$(check_service_status) # Changed variable name to avoid conflict
case "$service_status" in
system)
echo "InvokeAI is running as a system service. Use 'sudo systemctl stop invoke.service' to stop it."
;;
user)
if [[ -f "$PID_FILE" ]]; then
PID=$(cat "$PID_FILE")
echo "Stopping InvokeAI user process (PID: $PID)..."
kill "$PID"
wait_for_invokeai_stop
rm "$PID_FILE"
else
echo "PID file not found. InvokeAI may not be running as a user process."
fi
;;
not_running)
echo "InvokeAI is not running."
;;
esac
}
restart() {
stop
sleep 2 # Wait for a moment before starting again
start
}
wait_for_invokeai_ready() {
echo "Waiting for InvokeAI to be fully operational..."
local start_time=$(date +%s)
while true; do
if grep -q "Application startup complete" "$LOG_FILE"; then
echo "InvokeAI is now fully operational."
return 0
fi
local current_time=$(date +%s)
local elapsed_time=$((current_time - start_time))
if [[ $elapsed_time -ge $MAX_WAIT_TIME ]]; then
echo "Timeout: InvokeAI did not start within $MAX_WAIT_TIME seconds."
return 1
fi
sleep 1
done
}
wait_for_invokeai_stop() {
echo "Waiting for InvokeAI to stop..."
local start_time=$(date +%s)
while kill -0 "$PID" 2>/dev/null; do
local current_time=$(date +%s)
local elapsed_time=$((current_time - start_time))
if [[ $elapsed_time -ge $MAX_WAIT_TIME ]]; then
echo "Timeout: InvokeAI did not stop within $MAX_WAIT_TIME seconds. Forcing stop."
kill -9 "$PID"
break
fi
sleep 1
done
}
# Set NVIDIA limits before starting (uncomment if needed)
#set_nvidia_limits
# Set macOS-specific environment variable
if [[ "$(uname)" == "Darwin" ]]; then
export PYTORCH_ENABLE_MPS_FALLBACK=1
fi
case "$1" in
start)
start
;;
background)
start_background
;;
stop)
stop
;;
restart)
restart
;;
service)
start_service
;;
*)
echo "Usage: $0 {start|background|stop|restart|service}"
exit 1
;;
esac
[Unit]
Description=Invoke Web Service
After=network.target
[Service]
Type=simple
User=playlogic
ExecStart=/mnt/Hub/AI/INVOKEAI/invoke-web.sh service
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
#!/bin/bash
# Set limits for nvidia-smi
# based on rtx 3090
sudo nvidia-smi -pm 1
sudo nvidia-smi -i 0 -pl 320
sudo nvidia-smi -i 0 -lgc 1650,1950
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment