Add vibe-kanban

This commit is contained in:
2025-12-31 01:31:54 +07:00
parent 488d7c2a76
commit a37b59f29a
41 changed files with 2004 additions and 0 deletions

124
scripts/copy-database-for-task.sh Executable file
View File

@@ -0,0 +1,124 @@
#!/bin/bash
# scripts/copy-database-for-task.sh
# Copies database from main repo to task-specific PostgreSQL instance
TASK_ID=$1
SOURCE_HOST=${2:-"localhost"}
SOURCE_PORT=${3:-"5432"}
TARGET_HOST=${4:-"localhost"}
TARGET_PORT=${5:-"5433"}
SOURCE_DB="managing"
# Convert to lowercase (compatible with bash 3.2+)
TARGET_DB="managing_$(echo "$TASK_ID" | tr '[:upper:]' '[:lower:]')"
ORLEANS_SOURCE_DB="orleans"
ORLEANS_TARGET_DB="orleans_$(echo "$TASK_ID" | tr '[:upper:]' '[:lower:]')"
DB_USER="postgres"
DB_PASSWORD="postgres"
set -e # Exit on error
echo "📦 Copying database for task: $TASK_ID"
echo " Source: $SOURCE_HOST:$SOURCE_PORT"
echo " Target: $TARGET_HOST:$TARGET_PORT"
# Wait for target PostgreSQL to be ready
echo "⏳ Waiting for target PostgreSQL..."
for i in {1..60}; do
if PGPASSWORD=$DB_PASSWORD psql -h $TARGET_HOST -p $TARGET_PORT -U $DB_USER -d postgres -c '\q' 2>/dev/null; then
echo "✅ Target PostgreSQL is ready"
break
fi
if [ $i -eq 60 ]; then
echo "❌ Target PostgreSQL not ready after 60 attempts"
exit 1
fi
sleep 1
done
# Verify source database is accessible
echo "🔍 Verifying source database..."
if ! PGPASSWORD=$DB_PASSWORD psql -h $SOURCE_HOST -p $SOURCE_PORT -U $DB_USER -d postgres -c '\q' 2>/dev/null; then
echo "❌ Cannot connect to source database at $SOURCE_HOST:$SOURCE_PORT"
exit 1
fi
# Create target databases (drop if exists for fresh copy)
echo "🗑️ Dropping existing target databases if they exist..."
PGPASSWORD=$DB_PASSWORD psql -h $TARGET_HOST -p $TARGET_PORT -U $DB_USER -d postgres -c "DROP DATABASE IF EXISTS \"$TARGET_DB\";" 2>/dev/null || true
PGPASSWORD=$DB_PASSWORD psql -h $TARGET_HOST -p $TARGET_PORT -U $DB_USER -d postgres -c "DROP DATABASE IF EXISTS \"$ORLEANS_TARGET_DB\";" 2>/dev/null || true
echo "📝 Creating target databases..."
PGPASSWORD=$DB_PASSWORD psql -h $TARGET_HOST -p $TARGET_PORT -U $DB_USER -d postgres -c "CREATE DATABASE \"$TARGET_DB\";"
PGPASSWORD=$DB_PASSWORD psql -h $TARGET_HOST -p $TARGET_PORT -U $DB_USER -d postgres -c "CREATE DATABASE \"$ORLEANS_TARGET_DB\";"
# Create temporary dump files
TEMP_DIR=$(mktemp -d)
MANAGING_DUMP="$TEMP_DIR/managing_${TASK_ID}.dump"
ORLEANS_DUMP="$TEMP_DIR/orleans_${TASK_ID}.dump"
# Dump source databases
echo "📤 Dumping source database: $SOURCE_DB..."
PGPASSWORD=$DB_PASSWORD pg_dump -h $SOURCE_HOST -p $SOURCE_PORT -U $DB_USER -Fc "$SOURCE_DB" > "$MANAGING_DUMP"
if [ ! -s "$MANAGING_DUMP" ]; then
echo "❌ Failed to dump source database $SOURCE_DB"
rm -rf "$TEMP_DIR"
exit 1
fi
echo "📤 Dumping Orleans database: $ORLEANS_SOURCE_DB..."
PGPASSWORD=$DB_PASSWORD pg_dump -h $SOURCE_HOST -p $SOURCE_PORT -U $DB_USER -Fc "$ORLEANS_SOURCE_DB" > "$ORLEANS_DUMP" 2>/dev/null || {
echo "⚠️ Orleans database not found, skipping..."
ORLEANS_DUMP=""
}
# Restore to target databases
echo "📥 Restoring to target database: $TARGET_DB..."
PGPASSWORD=$DB_PASSWORD pg_restore -h $TARGET_HOST -p $TARGET_PORT -U $DB_USER -d "$TARGET_DB" --no-owner --no-acl --clean --if-exists "$MANAGING_DUMP"
if [ $? -eq 0 ]; then
echo "✅ Successfully restored $TARGET_DB"
else
echo "❌ Failed to restore $TARGET_DB"
rm -rf "$TEMP_DIR"
exit 1
fi
if [ -n "$ORLEANS_DUMP" ] && [ -s "$ORLEANS_DUMP" ]; then
echo "📥 Restoring Orleans database: $ORLEANS_TARGET_DB..."
PGPASSWORD=$DB_PASSWORD pg_restore -h $TARGET_HOST -p $TARGET_PORT -U $DB_USER -d "$ORLEANS_TARGET_DB" --no-owner --no-acl --clean --if-exists "$ORLEANS_DUMP"
if [ $? -eq 0 ]; then
echo "✅ Successfully restored $ORLEANS_TARGET_DB"
# Clean Orleans membership tables to avoid conflicts with old silos
echo "🧹 Cleaning Orleans membership tables (removing old silo entries)..."
PGPASSWORD=$DB_PASSWORD psql -h $TARGET_HOST -p $TARGET_PORT -U $DB_USER -d "$ORLEANS_TARGET_DB" <<EOF
-- Clear membership tables to start fresh (Orleans uses lowercase table names)
TRUNCATE TABLE IF EXISTS orleansmembershiptable CASCADE;
TRUNCATE TABLE IF EXISTS orleansmembershipversiontable CASCADE;
-- Note: We keep reminder and storage tables as they may contain application data
EOF
if [ $? -eq 0 ]; then
echo "✅ Orleans membership tables cleaned"
else
echo "⚠️ Failed to clean Orleans membership tables (tables may not exist yet, which is OK)"
fi
else
echo "⚠️ Failed to restore Orleans database (non-critical)"
fi
else
# Even if no Orleans dump, create empty database for fresh start
echo "📝 Orleans database will be created fresh by Orleans framework"
fi
# Cleanup
rm -rf "$TEMP_DIR"
echo "✅ Database copy completed successfully"
echo " Managing DB: $TARGET_DB on port $TARGET_PORT"
echo " Orleans DB: $ORLEANS_TARGET_DB on port $TARGET_PORT"

82
scripts/create-task-compose.sh Executable file
View File

@@ -0,0 +1,82 @@
#!/bin/bash
# scripts/create-task-compose.sh
# Creates a task-specific Docker Compose file with all required environment variables
TASK_ID=$1
PORT_OFFSET=${2:-0}
POSTGRES_PORT=$((5432 + PORT_OFFSET))
API_PORT=$((5000 + PORT_OFFSET))
WORKER_PORT=$((5001 + PORT_OFFSET))
REDIS_PORT=$((6379 + PORT_OFFSET))
ORLEANS_SILO_PORT=$((11111 + PORT_OFFSET))
ORLEANS_GATEWAY_PORT=$((30000 + PORT_OFFSET))
# Convert to lowercase (compatible with bash 3.2+)
DB_NAME="managing_$(echo "$TASK_ID" | tr '[:upper:]' '[:lower:]')"
ORLEANS_DB_NAME="orleans_$(echo "$TASK_ID" | tr '[:upper:]' '[:lower:]')"
TASK_ID_LOWER="$(echo "$TASK_ID" | tr '[:upper:]' '[:lower:]')"
# Calculate unique task slot based on port offset (for Orleans clustering)
TASK_SLOT=$((PORT_OFFSET / 10 + 1))
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
COMPOSE_DIR="$PROJECT_ROOT/src/Managing.Docker"
COMPOSE_FILE="$COMPOSE_DIR/docker-compose.task-${TASK_ID}.yml"
# Escape function for Docker Compose environment variables
escape_env() {
echo "$1" | sed 's/\\/\\\\/g' | sed 's/\$/\\$/g' | sed 's/"/\\"/g'
}
cat > "$COMPOSE_FILE" << EOF
name: task-${TASK_ID_LOWER}
services:
postgres-${TASK_ID}:
image: postgres:17.5
container_name: postgres-${TASK_ID}
volumes:
- postgresdata_${TASK_ID}:/var/lib/postgresql/data
ports:
- "${POSTGRES_PORT}:5432"
restart: unless-stopped
networks:
- task-${TASK_ID}-network
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=postgres
redis-${TASK_ID}:
image: redis:8.0.3
container_name: redis-${TASK_ID}
ports:
- "${REDIS_PORT}:6379"
volumes:
- redis_data_${TASK_ID}:/data
networks:
- task-${TASK_ID}-network
restart: unless-stopped
environment:
- REDIS_PASSWORD=
volumes:
postgresdata_${TASK_ID}:
redis_data_${TASK_ID}:
networks:
task-${TASK_ID}-network:
driver: bridge
EOF
echo "✅ Created $COMPOSE_FILE"
echo " PostgreSQL: localhost:$POSTGRES_PORT"
echo " Redis: localhost:$REDIS_PORT"
echo " API will run via dotnet run on port: $API_PORT"
echo " Orleans Silo: localhost:$ORLEANS_SILO_PORT"
echo " Orleans Gateway: localhost:$ORLEANS_GATEWAY_PORT"
echo " InfluxDB: Using main instance at localhost:8086"
echo " Task Slot: $TASK_SLOT"

109
scripts/start-api-and-workers.sh Executable file
View File

@@ -0,0 +1,109 @@
#!/bin/bash
# scripts/start-api-and-workers.sh
# Starts API and Workers using dotnet run (not Docker)
# This script is called by start-task-docker.sh after database is ready
# IMPORTANT: This script runs from the current working directory (Vibe Kanban worktree)
TASK_ID=$1
PORT_OFFSET=${2:-0}
# Use Vibe Kanban worktree if available, otherwise use current directory
# This ensures we're running from the worktree, not the main repo
if [ -n "$VIBE_WORKTREE_ROOT" ] && [ -d "$VIBE_WORKTREE_ROOT/src/Managing.Api" ]; then
PROJECT_ROOT="$VIBE_WORKTREE_ROOT"
echo "📁 Using Vibe Kanban worktree: $PROJECT_ROOT"
else
PROJECT_ROOT="$(pwd)"
echo "📁 Using current directory: $PROJECT_ROOT"
fi
SCRIPT_DIR="$PROJECT_ROOT/scripts"
POSTGRES_PORT=$((5432 + PORT_OFFSET))
API_PORT=$((5000 + PORT_OFFSET))
REDIS_PORT=$((6379 + PORT_OFFSET))
ORLEANS_SILO_PORT=$((11111 + PORT_OFFSET))
ORLEANS_GATEWAY_PORT=$((30000 + PORT_OFFSET))
# Convert to lowercase (compatible with bash 3.2+)
DB_NAME="managing_$(echo "$TASK_ID" | tr '[:upper:]' '[:lower:]')"
ORLEANS_DB_NAME="orleans_$(echo "$TASK_ID" | tr '[:upper:]' '[:lower:]')"
# Calculate unique task slot based on port offset (for Orleans clustering)
TASK_SLOT=$((PORT_OFFSET / 10 + 1))
# PID files for process management
PID_DIR="$PROJECT_ROOT/.task-pids"
mkdir -p "$PID_DIR"
API_PID_FILE="$PID_DIR/api-${TASK_ID}.pid"
WORKERS_PID_FILE="$PID_DIR/workers-${TASK_ID}.pid"
# Set environment variables for API
export ASPNETCORE_ENVIRONMENT=Development
export ASPNETCORE_URLS="http://localhost:${API_PORT}"
export RUN_ORLEANS_GRAINS=true
export SILO_ROLE=Trading
export TASK_SLOT=${TASK_SLOT}
export PostgreSql__ConnectionString="Host=localhost;Port=${POSTGRES_PORT};Database=${DB_NAME};Username=postgres;Password=postgres"
export PostgreSql__Orleans="Host=localhost;Port=${POSTGRES_PORT};Database=${ORLEANS_DB_NAME};Username=postgres;Password=postgres"
export InfluxDb__Url="http://localhost:8086/"
export InfluxDb__Token="Fw2FPL2OwTzDHzSbR2Sd5xs0EKQYy00Q-hYKYAhr9cC1_q5YySONpxuf_Ck0PTjyUiF13xXmi__bu_pXH-H9zA=="
export Jwt__Secret="2ed5f490-b6c1-4cad-8824-840c911f1fe6"
export Privy__AppSecret="63Chz2z5M8TgR5qc8dznSLRAGTHTyPU4cjdQobrBF1Cx5tszZpTuFgyrRd7hZ2k6HpwDz3GEwQZzsCqHb8Z311bF"
export AdminUsers="did:privy:cm7vxs99f0007blcl8cmzv74t;did:privy:cmhp5jqs2014kl60cbunp57jh"
export AUTHORIZED_ADDRESSES="0x932167388dD9aad41149b3cA23eBD489E2E2DD78;0x84e3E147c4e94716151181F25538aBf337Eca49f;0xeaf2a9a5864e3Cc37E85dDC287Ed0c90d76b2420"
export ENABLE_COPY_TRADING_VALIDATION=false
export KAIGEN_CREDITS_ENABLED=false
export KAIGEN_SECRET_KEY="KaigenXCowchain"
export Flagsmith__ApiKey="ser.ShJJJMtWYS9fwuzd83ejwR"
export Discord__ApplicationId="966075382002516031"
export Discord__PublicKey="63028f6bb740cd5d26ae0340b582dee2075624011b28757436255fc002ca8a7c"
export Discord__TokenId="OTY2MDc1MzgyMDAyNTE2MDMx.Yl8dzw.xpeIAaMwGrwTNY4r9JYv0ebzb-U"
export N8n__WebhookUrl="https://n8n.kai.managing.live/webhook/fa9308b6-983b-42ec-b085-71599d655951"
export N8n__IndicatorRequestWebhookUrl="https://n8n.kai.managing.live/webhook/3aa07b66-1e64-46a7-8618-af300914cb11"
export N8n__Username="managing-api"
export N8n__Password="T259836*PdiV2@%!eR%Qf4"
export Sentry__Dsn="https://fe12add48c56419bbdfa86227c188e7a@glitch.kai.managing.live/1"
# Verify we're in the right directory (should have src/Managing.Api)
if [ ! -d "$PROJECT_ROOT/src/Managing.Api" ]; then
echo "❌ Error: src/Managing.Api not found in current directory: $PROJECT_ROOT"
echo "💡 Make sure you're running from the project root (or Vibe Kanban worktree)"
exit 1
fi
echo "🚀 Starting API on port $API_PORT..."
echo "📁 Running from: $PROJECT_ROOT"
cd "$PROJECT_ROOT/src/Managing.Api"
dotnet run > "$PID_DIR/api-${TASK_ID}.log" 2>&1 &
API_PID=$!
echo $API_PID > "$API_PID_FILE"
echo "✅ API started (PID: $API_PID) from worktree: $PROJECT_ROOT"
# Wait a bit for API to start
sleep 3
echo "🚀 Starting Workers..."
cd "$PROJECT_ROOT/src/Managing.Workers"
# Set workers environment variables (separate from API)
ASPNETCORE_ENVIRONMENT=Development \
PostgreSql__ConnectionString="Host=localhost;Port=${POSTGRES_PORT};Database=${DB_NAME};Username=postgres;Password=postgres" \
InfluxDb__Url="http://localhost:8086/" \
InfluxDb__Token="Fw2FPL2OwTzDHzSbR2Sd5xs0EKQYy00Q-hYKYAhr9cC1_q5YySONpxuf_Ck0PTjyUiF13xXmi__bu_pXH-H9zA==" \
KAIGEN_SECRET_KEY="KaigenXCowchain" \
Flagsmith__ApiKey="ser.ShJJJMtWYS9fwuzd83ejwR" \
dotnet run > "$PID_DIR/workers-${TASK_ID}.log" 2>&1 &
WORKERS_PID=$!
echo $WORKERS_PID > "$WORKERS_PID_FILE"
echo "✅ Workers started (PID: $WORKERS_PID) from worktree: $PROJECT_ROOT"
echo ""
echo "✅ API and Workers started!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📊 API: http://localhost:$API_PORT"
echo "📋 API PID: $API_PID"
echo "📋 Workers PID: $WORKERS_PID"
echo "📋 Logs: $PID_DIR/api-${TASK_ID}.log"
echo "📋 Logs: $PID_DIR/workers-${TASK_ID}.log"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

38
scripts/start-dev-env.sh Executable file
View File

@@ -0,0 +1,38 @@
#!/bin/bash
# scripts/start-dev-env.sh
# Simple wrapper for dev agent to start Docker Compose task environments
TASK_ID=${1:-"DEV-$(date +%Y%m%d-%H%M%S)"}
PORT_OFFSET=${2:-0}
echo "🚀 Starting Docker dev environment..."
echo "📋 Task ID: $TASK_ID"
echo "🔌 Port Offset: $PORT_OFFSET"
echo ""
# Get script directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
# Check prerequisites
echo "🔍 Checking prerequisites..."
# Check main database
if ! PGPASSWORD=postgres psql -h localhost -p 5432 -U postgres -d managing -c '\q' 2>/dev/null; then
echo "❌ Main database not accessible at localhost:5432"
echo "💡 Starting main database..."
cd "$SCRIPT_DIR/../src/Managing.Docker"
docker-compose -f docker-compose.yml -f docker-compose.local.yml up -d postgres
echo "⏳ Waiting for database to start..."
sleep 15
fi
# Check Docker
if ! docker ps >/dev/null 2>&1; then
echo "❌ Docker is not running"
exit 1
fi
# Start task environment
echo "🚀 Starting task environment..."
bash "$SCRIPT_DIR/start-task-docker.sh" "$TASK_ID" "$PORT_OFFSET"

189
scripts/start-task-docker.sh Executable file
View File

@@ -0,0 +1,189 @@
#!/bin/bash
# scripts/start-task-docker.sh
# Starts a Docker Compose environment for a specific task with database copy
TASK_ID=$1
PORT_OFFSET=${2:-0}
# Determine project root
# If called from main repo, use current directory
# If called from worktree wrapper, we should be in main repo already
if [ -d "$(pwd)/scripts" ] && [ -f "$(pwd)/scripts/start-api-and-workers.sh" ]; then
# We're in the main repo
PROJECT_ROOT="$(pwd)"
echo "📁 Using main repository: $PROJECT_ROOT"
else
# Try to find main repo
MAIN_REPO="/Users/oda/Desktop/Projects/managing-apps"
if [ -d "$MAIN_REPO/scripts" ]; then
PROJECT_ROOT="$MAIN_REPO"
echo "📁 Using main repository: $PROJECT_ROOT"
else
echo "❌ Error: Cannot find main repository with scripts"
exit 1
fi
fi
SCRIPT_DIR="$PROJECT_ROOT/scripts"
# Auto-detect port offset if 0 is provided (to avoid conflicts with main database)
if [ "$PORT_OFFSET" = "0" ]; then
echo "🔍 Auto-detecting available port offset (to avoid conflicts with main database)..."
# Find an available port offset (start from 1, check up to 100)
PORT_OFFSET_FOUND=0
for offset in $(seq 1 100); do
POSTGRES_TEST=$((5432 + offset))
REDIS_TEST=$((6379 + offset))
API_TEST=$((5000 + offset))
ORLEANS_SILO_TEST=$((11111 + offset))
ORLEANS_GATEWAY_TEST=$((30000 + offset))
# Check if ports are available (try multiple methods for compatibility)
POSTGRES_FREE=true
REDIS_FREE=true
API_FREE=true
ORLEANS_SILO_FREE=true
ORLEANS_GATEWAY_FREE=true
# Method 1: lsof (macOS/Linux)
if command -v lsof >/dev/null 2>&1; then
if lsof -Pi :$POSTGRES_TEST -sTCP:LISTEN -t >/dev/null 2>&1; then
POSTGRES_FREE=false
fi
if lsof -Pi :$REDIS_TEST -sTCP:LISTEN -t >/dev/null 2>&1; then
REDIS_FREE=false
fi
if lsof -Pi :$API_TEST -sTCP:LISTEN -t >/dev/null 2>&1; then
API_FREE=false
fi
if lsof -Pi :$ORLEANS_SILO_TEST -sTCP:LISTEN -t >/dev/null 2>&1; then
ORLEANS_SILO_FREE=false
fi
if lsof -Pi :$ORLEANS_GATEWAY_TEST -sTCP:LISTEN -t >/dev/null 2>&1; then
ORLEANS_GATEWAY_FREE=false
fi
# Method 2: netstat (fallback)
elif command -v netstat >/dev/null 2>&1; then
if netstat -an | grep -q ":$POSTGRES_TEST.*LISTEN"; then
POSTGRES_FREE=false
fi
if netstat -an | grep -q ":$REDIS_TEST.*LISTEN"; then
REDIS_FREE=false
fi
if netstat -an | grep -q ":$API_TEST.*LISTEN"; then
API_FREE=false
fi
fi
# If all ports are free, use this offset
if [ "$POSTGRES_FREE" = "true" ] && [ "$REDIS_FREE" = "true" ] && [ "$API_FREE" = "true" ] && [ "$ORLEANS_SILO_FREE" = "true" ] && [ "$ORLEANS_GATEWAY_FREE" = "true" ]; then
PORT_OFFSET=$offset
PORT_OFFSET_FOUND=1
echo "✅ Found available port offset: $PORT_OFFSET"
echo " PostgreSQL: $POSTGRES_TEST"
echo " Redis: $REDIS_TEST"
echo " API: $API_TEST"
break
fi
done
if [ "$PORT_OFFSET_FOUND" = "0" ]; then
echo "❌ Could not find available port offset (checked offsets 1-100)"
echo "💡 Try manually specifying a port offset: bash $0 $TASK_ID 10"
exit 1
fi
fi
POSTGRES_PORT=$((5432 + PORT_OFFSET))
API_PORT=$((5000 + PORT_OFFSET))
REDIS_PORT=$((6379 + PORT_OFFSET))
# Convert to lowercase (compatible with bash 3.2+)
DB_NAME="managing_$(echo "$TASK_ID" | tr '[:upper:]' '[:lower:]')"
ORLEANS_DB_NAME="orleans_$(echo "$TASK_ID" | tr '[:upper:]' '[:lower:]')"
echo "🚀 Starting Docker environment for task: $TASK_ID"
echo "📊 Port offset: $PORT_OFFSET"
echo "📊 PostgreSQL: localhost:$POSTGRES_PORT"
echo "🔌 API: http://localhost:$API_PORT"
echo "💾 Redis: localhost:$REDIS_PORT"
echo "💾 Database: $DB_NAME"
# Verify main database is accessible
echo "🔍 Verifying main database connection..."
if ! PGPASSWORD=postgres psql -h localhost -p 5432 -U postgres -d managing -c '\q' 2>/dev/null; then
echo "❌ Cannot connect to main database at localhost:5432"
echo "💡 Starting main database..."
cd "$PROJECT_ROOT/src/Managing.Docker"
# Use docker compose (newer) or docker-compose (older)
if command -v docker &> /dev/null && docker compose version &> /dev/null; then
docker compose -f docker-compose.yml -f docker-compose.local.yml up -d postgres
else
docker-compose -f docker-compose.yml -f docker-compose.local.yml up -d postgres
fi
echo "⏳ Waiting for database to start..."
sleep 15
fi
# Create compose file
echo "📝 Creating Docker Compose file..."
bash "$SCRIPT_DIR/create-task-compose.sh" "$TASK_ID" "$PORT_OFFSET"
COMPOSE_FILE="$PROJECT_ROOT/src/Managing.Docker/docker-compose.task-${TASK_ID}.yml"
# Start services (except API/Workers - we'll start them after DB copy)
echo "🐳 Starting PostgreSQL, Redis..."
cd "$PROJECT_ROOT/src/Managing.Docker"
# Use docker compose (newer) or docker-compose (older)
if command -v docker &> /dev/null && docker compose version &> /dev/null; then
docker compose -f "$COMPOSE_FILE" up -d postgres-${TASK_ID} redis-${TASK_ID}
else
docker-compose -f "$COMPOSE_FILE" up -d postgres-${TASK_ID} redis-${TASK_ID}
fi
# Wait for PostgreSQL
echo "⏳ Waiting for PostgreSQL..."
for i in {1..60}; do
if PGPASSWORD=postgres psql -h localhost -p $POSTGRES_PORT -U postgres -d postgres -c '\q' 2>/dev/null; then
echo "✅ PostgreSQL is ready"
break
fi
if [ $i -eq 60 ]; then
echo "❌ PostgreSQL not ready after 60 attempts"
if command -v docker &> /dev/null && docker compose version &> /dev/null; then
docker compose -f "$COMPOSE_FILE" down
else
docker-compose -f "$COMPOSE_FILE" down
fi
exit 1
fi
sleep 2
done
# Copy database
echo "📦 Copying database from main repo..."
bash "$SCRIPT_DIR/copy-database-for-task.sh" "$TASK_ID" "localhost" "5432" "localhost" "$POSTGRES_PORT"
if [ $? -eq 0 ]; then
# Start API and Workers using dotnet run
echo "🚀 Starting API and Workers with dotnet run..."
bash "$SCRIPT_DIR/start-api-and-workers.sh" "$TASK_ID" "$PORT_OFFSET"
echo ""
echo "✅ Environment ready!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📊 API: http://localhost:$API_PORT"
echo "💾 Database: $DB_NAME on port $POSTGRES_PORT"
echo "💾 Redis: localhost:$REDIS_PORT"
echo "🔧 To view API logs: tail -f .task-pids/api-${TASK_ID}.log"
echo "🔧 To view Workers logs: tail -f .task-pids/workers-${TASK_ID}.log"
echo "🔧 To stop: bash scripts/stop-task-docker.sh $TASK_ID"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
else
echo "❌ Database copy failed"
if command -v docker &> /dev/null && docker compose version &> /dev/null; then
docker compose -f "$COMPOSE_FILE" down
else
docker-compose -f "$COMPOSE_FILE" down
fi
exit 1
fi

82
scripts/stop-task-docker.sh Executable file
View File

@@ -0,0 +1,82 @@
#!/bin/bash
# scripts/stop-task-docker.sh
# Stops and cleans up a task-specific Docker Compose environment and dotnet processes
TASK_ID=$1
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
COMPOSE_DIR="$PROJECT_ROOT/src/Managing.Docker"
COMPOSE_FILE="$COMPOSE_DIR/docker-compose.task-${TASK_ID}.yml"
PID_DIR="$PROJECT_ROOT/.task-pids"
API_PID_FILE="$PID_DIR/api-${TASK_ID}.pid"
WORKERS_PID_FILE="$PID_DIR/workers-${TASK_ID}.pid"
if [ -z "$TASK_ID" ]; then
echo "❌ Usage: $0 <TASK_ID>"
exit 1
fi
echo "🛑 Stopping environment for task: $TASK_ID"
# Stop dotnet processes (API and Workers)
if [ -f "$API_PID_FILE" ]; then
API_PID=$(cat "$API_PID_FILE")
if ps -p "$API_PID" > /dev/null 2>&1; then
echo "🛑 Stopping API (PID: $API_PID)..."
kill "$API_PID" 2>/dev/null || true
sleep 2
# Force kill if still running
if ps -p "$API_PID" > /dev/null 2>&1; then
kill -9 "$API_PID" 2>/dev/null || true
fi
echo "✅ API stopped"
fi
rm -f "$API_PID_FILE"
fi
if [ -f "$WORKERS_PID_FILE" ]; then
WORKERS_PID=$(cat "$WORKERS_PID_FILE")
if ps -p "$WORKERS_PID" > /dev/null 2>&1; then
echo "🛑 Stopping Workers (PID: $WORKERS_PID)..."
kill "$WORKERS_PID" 2>/dev/null || true
sleep 2
# Force kill if still running
if ps -p "$WORKERS_PID" > /dev/null 2>&1; then
kill -9 "$WORKERS_PID" 2>/dev/null || true
fi
echo "✅ Workers stopped"
fi
rm -f "$WORKERS_PID_FILE"
fi
# Clean up log files
rm -f "$PID_DIR/api-${TASK_ID}.log" "$PID_DIR/workers-${TASK_ID}.log" 2>/dev/null || true
# Stop Docker services (PostgreSQL and Redis)
cd "$COMPOSE_DIR"
if [ -f "$COMPOSE_FILE" ]; then
echo "🛑 Stopping Docker services..."
if command -v docker &> /dev/null && docker compose version &> /dev/null; then
docker compose -f "$COMPOSE_FILE" down -v
else
docker-compose -f "$COMPOSE_FILE" down -v
fi
rm -f "$COMPOSE_FILE"
echo "✅ Docker services stopped"
else
echo "⚠️ Compose file not found: $COMPOSE_FILE"
echo "💡 Trying to stop containers manually..."
# Try to stop containers by name pattern
docker stop postgres-${TASK_ID} redis-${TASK_ID} 2>/dev/null || true
docker rm postgres-${TASK_ID} redis-${TASK_ID} 2>/dev/null || true
# Remove volumes
docker volume rm postgresdata_${TASK_ID} redis_data_${TASK_ID} 2>/dev/null || true
echo "✅ Docker cleanup attempted"
fi
echo "✅ Environment stopped and cleaned up"

58
scripts/vibe-dev-server.sh Executable file
View File

@@ -0,0 +1,58 @@
#!/bin/bash
# scripts/vibe-dev-server.sh
# Minimal script for Vibe Kanban worktrees
# This script runs from the worktree and uses main repo scripts for Docker setup
TASK_ID=${1:-"DEV-$(date +%Y%m%d-%H%M%S)"}
PORT_OFFSET=${2:-0}
# Detect worktree root
WORKTREE_ROOT="$(pwd)"
# Check if we're in a nested structure (Vibe Kanban worktree)
if [ -d "$WORKTREE_ROOT/managing-apps" ] && [ -d "$WORKTREE_ROOT/managing-apps/src/Managing.Api" ]; then
WORKTREE_PROJECT_ROOT="$WORKTREE_ROOT/managing-apps"
elif [ -d "$WORKTREE_ROOT/src/Managing.Api" ]; then
WORKTREE_PROJECT_ROOT="$WORKTREE_ROOT"
else
echo "❌ Cannot find project structure in worktree"
echo " Current directory: $WORKTREE_ROOT"
exit 1
fi
echo "📁 Worktree project root: $WORKTREE_PROJECT_ROOT"
# Find main repository (try common locations)
MAIN_REPO_PATHS=(
"/Users/oda/Desktop/Projects/managing-apps"
"$(git -C "$WORKTREE_PROJECT_ROOT" rev-parse --show-toplevel 2>/dev/null || echo '')"
"$(dirname "$WORKTREE_ROOT" 2>/dev/null)/managing-apps"
)
MAIN_REPO=""
for path in "${MAIN_REPO_PATHS[@]}"; do
if [ -n "$path" ] && [ -d "$path" ] && [ -d "$path/scripts" ] && [ -f "$path/scripts/start-task-docker.sh" ]; then
MAIN_REPO="$path"
break
fi
done
if [ -z "$MAIN_REPO" ]; then
echo "❌ Cannot find main repository with scripts"
echo "💡 Tried:"
for path in "${MAIN_REPO_PATHS[@]}"; do
echo " - $path"
done
exit 1
fi
echo "📁 Main repository: $MAIN_REPO"
echo "🚀 Starting dev environment..."
echo " Task ID: $TASK_ID"
echo " Port offset: $PORT_OFFSET"
# Export worktree path so main repo scripts know where to run dotnet from
export VIBE_WORKTREE_ROOT="$WORKTREE_PROJECT_ROOT"
# Call main repo's start script
bash "$MAIN_REPO/scripts/start-task-docker.sh" "$TASK_ID" "$PORT_OFFSET"