Initial commit
Some checks failed
CI/CD Pipeline / Code Quality & Linting (push) Has been cancelled
CI/CD Pipeline / Policy Validation (push) Has been cancelled
CI/CD Pipeline / Test Suite (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-coverage) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-extract) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-firm-connectors) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-forms) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-hmrc) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-ingestion) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-kg) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-normalize-map) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-ocr) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-rag-indexer) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-rag-retriever) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-reason) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-rpa) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (ui-review) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-coverage) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-extract) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-kg) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-rag-retriever) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (ui-review) (push) Has been cancelled
CI/CD Pipeline / Generate SBOM (push) Has been cancelled
CI/CD Pipeline / Deploy to Staging (push) Has been cancelled
CI/CD Pipeline / Deploy to Production (push) Has been cancelled
CI/CD Pipeline / Notifications (push) Has been cancelled
Some checks failed
CI/CD Pipeline / Code Quality & Linting (push) Has been cancelled
CI/CD Pipeline / Policy Validation (push) Has been cancelled
CI/CD Pipeline / Test Suite (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-coverage) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-extract) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-firm-connectors) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-forms) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-hmrc) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-ingestion) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-kg) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-normalize-map) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-ocr) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-rag-indexer) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-rag-retriever) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-reason) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-rpa) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (ui-review) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-coverage) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-extract) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-kg) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-rag-retriever) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (ui-review) (push) Has been cancelled
CI/CD Pipeline / Generate SBOM (push) Has been cancelled
CI/CD Pipeline / Deploy to Staging (push) Has been cancelled
CI/CD Pipeline / Deploy to Production (push) Has been cancelled
CI/CD Pipeline / Notifications (push) Has been cancelled
This commit is contained in:
200
scripts/authentik-blueprint-import.sh
Executable file
200
scripts/authentik-blueprint-import.sh
Executable file
@@ -0,0 +1,200 @@
|
||||
#!/bin/bash
|
||||
# Test Authentik blueprint import after manual setup
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
DOMAIN=${DOMAIN:-local}
|
||||
AUTHENTIK_URL="https://auth.${DOMAIN}"
|
||||
AUTHENTIK_API_URL="$AUTHENTIK_URL/api/v3"
|
||||
ADMIN_EMAIL="admin@local.local"
|
||||
ADMIN_PASSWORD="${AUTHENTIK_ADMIN_PASSWORD:-admin123}"
|
||||
|
||||
echo -e "${BLUE}🧪 Testing Authentik blueprint import...${NC}"
|
||||
echo
|
||||
|
||||
# Function to check if setup is complete
|
||||
check_setup_complete() {
|
||||
local host
|
||||
host=$(echo "$AUTHENTIK_URL" | sed -E 's#^https?://([^/]+).*$#\1#')
|
||||
local resolve=(--resolve "${host}:443:127.0.0.1")
|
||||
local setup_code
|
||||
setup_code=$(curl -ks "${resolve[@]}" -o /dev/null -w '%{http_code}' "$AUTHENTIK_URL/if/flow/initial-setup/" || true)
|
||||
|
||||
if [[ "$setup_code" == "404" ]]; then
|
||||
return 0 # Setup is complete
|
||||
else
|
||||
return 1 # Setup is still needed
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to get API token via login
|
||||
get_api_token_via_login() {
|
||||
echo -e "${YELLOW}🔑 Getting API token via login...${NC}"
|
||||
|
||||
local host
|
||||
host=$(echo "$AUTHENTIK_URL" | sed -E 's#^https?://([^/]+).*$#\1#')
|
||||
local resolve=(--resolve "${host}:443:127.0.0.1")
|
||||
|
||||
# Get login page and extract CSRF token
|
||||
local login_page
|
||||
login_page=$(curl -ks "${resolve[@]}" -c /tmp/auth_cookies.txt "$AUTHENTIK_URL/if/flow/default-authentication-flow/" || echo "")
|
||||
|
||||
if [ -z "$login_page" ]; then
|
||||
echo -e "${RED}❌ Could not access login page${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract CSRF token from the page
|
||||
local csrf_token
|
||||
csrf_token=$(echo "$login_page" | grep -o 'name="csrfmiddlewaretoken"[^>]*value="[^"]*"' | sed 's/.*value="\([^"]*\)".*/\1/' | head -1 || echo "")
|
||||
|
||||
if [ -z "$csrf_token" ]; then
|
||||
echo -e "${RED}❌ Could not extract CSRF token${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✅ CSRF token extracted${NC}"
|
||||
|
||||
# Login
|
||||
local login_response
|
||||
login_response=$(curl -ks "${resolve[@]}" -b /tmp/auth_cookies.txt -c /tmp/auth_cookies.txt \
|
||||
-X POST "$AUTHENTIK_URL/if/flow/default-authentication-flow/" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-H "Referer: $AUTHENTIK_URL/if/flow/default-authentication-flow/" \
|
||||
-d "csrfmiddlewaretoken=$csrf_token&uid_field=$ADMIN_EMAIL&password=$ADMIN_PASSWORD" \
|
||||
-w '%{http_code}' -o /tmp/login_response.html || echo "")
|
||||
|
||||
if [[ "$login_response" =~ ^(200|302)$ ]]; then
|
||||
echo -e "${GREEN}✅ Login successful${NC}"
|
||||
|
||||
# Get admin interface page to get new CSRF token
|
||||
local admin_page
|
||||
admin_page=$(curl -ks "${resolve[@]}" -b /tmp/auth_cookies.txt "$AUTHENTIK_URL/if/admin/" || echo "")
|
||||
|
||||
local admin_csrf
|
||||
admin_csrf=$(echo "$admin_page" | grep -o 'name="csrfmiddlewaretoken"[^>]*value="[^"]*"' | sed 's/.*value="\([^"]*\)".*/\1/' | head -1 || echo "")
|
||||
|
||||
if [ -n "$admin_csrf" ]; then
|
||||
# Create API token
|
||||
local token_response
|
||||
token_response=$(curl -ks "${resolve[@]}" -b /tmp/auth_cookies.txt \
|
||||
-X POST "$AUTHENTIK_API_URL/core/tokens/" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-CSRFToken: $admin_csrf" \
|
||||
-d "{
|
||||
\"identifier\": \"blueprint-test-$(date +%s)\",
|
||||
\"description\": \"Test token for blueprint import\",
|
||||
\"expires\": \"2025-12-31T23:59:59Z\"
|
||||
}" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$token_response" ]; then
|
||||
local token
|
||||
token=$(echo "$token_response" | python3 -c "import sys, json; print(json.load(sys.stdin)['key'])" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$token" ]; then
|
||||
echo -e "${GREEN}✅ API token created${NC}"
|
||||
echo "$token"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "${RED}❌ Failed to get API token${NC}"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Function to import blueprint
|
||||
import_blueprint() {
|
||||
local token="$1"
|
||||
|
||||
echo -e "${YELLOW}📋 Importing blueprint...${NC}"
|
||||
|
||||
local host
|
||||
host=$(echo "$AUTHENTIK_URL" | sed -E 's#^https?://([^/]+).*$#\1#')
|
||||
local resolve=(--resolve "${host}:443:127.0.0.1")
|
||||
|
||||
# Create blueprint instance
|
||||
local blueprint_response
|
||||
blueprint_response=$(curl -ks "${resolve[@]}" \
|
||||
-X POST "$AUTHENTIK_API_URL/managed/blueprints/" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $token" \
|
||||
-d '{
|
||||
"name": "AI Tax Agent Bootstrap",
|
||||
"path": "/blueprints/bootstrap.yaml",
|
||||
"context": {},
|
||||
"enabled": true
|
||||
}' 2>/dev/null || echo "")
|
||||
|
||||
echo -e "${BLUE}Blueprint creation response:${NC}"
|
||||
echo "$blueprint_response" | python3 -c "import sys, json; print(json.dumps(json.load(sys.stdin), indent=2))" 2>/dev/null || echo "$blueprint_response"
|
||||
|
||||
local blueprint_pk
|
||||
blueprint_pk=$(echo "$blueprint_response" | python3 -c "import sys, json; print(json.load(sys.stdin).get('pk', ''))" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$blueprint_pk" ]; then
|
||||
echo -e "${GREEN}✅ Blueprint created with ID: $blueprint_pk${NC}"
|
||||
|
||||
# Apply the blueprint
|
||||
echo -e "${YELLOW}🔄 Applying blueprint...${NC}"
|
||||
local apply_response
|
||||
apply_response=$(curl -ks "${resolve[@]}" \
|
||||
-X POST "$AUTHENTIK_API_URL/managed/blueprints/$blueprint_pk/apply/" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $token" \
|
||||
-d '{}' 2>/dev/null || echo "")
|
||||
|
||||
echo -e "${BLUE}Blueprint apply response:${NC}"
|
||||
echo "$apply_response" | python3 -c "import sys, json; print(json.dumps(json.load(sys.stdin), indent=2))" 2>/dev/null || echo "$apply_response"
|
||||
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}❌ Failed to create blueprint${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main function
|
||||
main() {
|
||||
# Check if setup is complete
|
||||
if ! check_setup_complete; then
|
||||
echo -e "${YELLOW}⚠️ Initial setup is still required${NC}"
|
||||
echo -e "${BLUE}📋 Please complete setup at: https://auth.local/if/flow/initial-setup/${NC}"
|
||||
echo -e "${BLUE}Use credentials: admin@local.local / admin123${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✅ Initial setup is complete${NC}"
|
||||
|
||||
# Get API token
|
||||
local api_token
|
||||
if api_token=$(get_api_token_via_login); then
|
||||
echo -e "${GREEN}🔑 API token obtained${NC}"
|
||||
|
||||
# Import blueprint
|
||||
if import_blueprint "$api_token"; then
|
||||
echo -e "${GREEN}🎉 Blueprint import test completed!${NC}"
|
||||
else
|
||||
echo -e "${RED}❌ Blueprint import failed${NC}"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}❌ Could not get API token${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
rm -f /tmp/auth_cookies.txt /tmp/login_response.html
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
155
scripts/authentik-setup.sh
Executable file
155
scripts/authentik-setup.sh
Executable file
@@ -0,0 +1,155 @@
|
||||
#!/bin/bash
|
||||
# Complete Authentik initial setup and get API token
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
DOMAIN=${DOMAIN:-local}
|
||||
AUTHENTIK_URL="https://auth.${DOMAIN}"
|
||||
ADMIN_EMAIL="admin@local"
|
||||
ADMIN_PASSWORD="${AUTHENTIK_ADMIN_PASSWORD:-admin123}"
|
||||
ENV_FILE="infra/compose/.env"
|
||||
|
||||
echo -e "${BLUE}🔧 Completing Authentik initial setup...${NC}"
|
||||
echo
|
||||
|
||||
# Function to update env file
|
||||
update_env_var() {
|
||||
local var_name="$1"
|
||||
local var_value="$2"
|
||||
|
||||
if grep -q "^${var_name}=" "$ENV_FILE"; then
|
||||
# Update existing variable
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
# macOS
|
||||
sed -i '' "s|^${var_name}=.*|${var_name}=${var_value}|" "$ENV_FILE"
|
||||
else
|
||||
# Linux
|
||||
sed -i "s|^${var_name}=.*|${var_name}=${var_value}|" "$ENV_FILE"
|
||||
fi
|
||||
echo -e "${GREEN}✅ Updated ${var_name}${NC}"
|
||||
else
|
||||
# Add new variable
|
||||
echo "${var_name}=${var_value}" >> "$ENV_FILE"
|
||||
echo -e "${GREEN}✅ Added ${var_name}${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check if setup is complete
|
||||
check_setup_status() {
|
||||
local host
|
||||
host=$(echo "$AUTHENTIK_URL" | sed -E 's#^https?://([^/]+).*$#\1#')
|
||||
local resolve=(--resolve "${host}:443:127.0.0.1")
|
||||
local setup_code
|
||||
setup_code=$(curl -ks "${resolve[@]}" -o /dev/null -w '%{http_code}' "$AUTHENTIK_URL/if/flow/initial-setup/" || true)
|
||||
|
||||
if [[ "$setup_code" == "404" ]]; then
|
||||
return 0 # Setup is complete
|
||||
else
|
||||
return 1 # Setup is still needed
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to get API token
|
||||
get_api_token() {
|
||||
echo -e "${YELLOW}🔑 Getting API token...${NC}"
|
||||
|
||||
local host
|
||||
host=$(echo "$AUTHENTIK_URL" | sed -E 's#^https?://([^/]+).*$#\1#')
|
||||
local resolve=(--resolve "${host}:443:127.0.0.1")
|
||||
|
||||
# Get CSRF token first
|
||||
local csrf_token
|
||||
csrf_token=$(curl -ks "${resolve[@]}" -c /tmp/authentik_cookies.txt "$AUTHENTIK_URL/if/flow/default-authentication-flow/" | grep -o 'csrfmiddlewaretoken[^>]*value="[^"]*"' | sed 's/.*value="\([^"]*\)".*/\1/' || echo "")
|
||||
|
||||
if [ -z "$csrf_token" ]; then
|
||||
echo -e "${RED}❌ Could not get CSRF token${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Login to get session
|
||||
local login_response
|
||||
login_response=$(curl -ks "${resolve[@]}" -b /tmp/authentik_cookies.txt -c /tmp/authentik_cookies.txt \
|
||||
-X POST "$AUTHENTIK_URL/if/flow/default-authentication-flow/" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-H "Referer: $AUTHENTIK_URL/if/flow/default-authentication-flow/" \
|
||||
-d "csrfmiddlewaretoken=$csrf_token&uid_field=$ADMIN_EMAIL&password=$ADMIN_PASSWORD" \
|
||||
-w '%{http_code}' -o /tmp/login_response.html || echo "")
|
||||
|
||||
if [[ "$login_response" =~ ^(200|302)$ ]]; then
|
||||
echo -e "${GREEN}✅ Login successful${NC}"
|
||||
|
||||
# Create API token
|
||||
local token_response
|
||||
token_response=$(curl -ks "${resolve[@]}" -b /tmp/authentik_cookies.txt \
|
||||
-X POST "$AUTHENTIK_URL/api/v3/core/tokens/" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-CSRFToken: $csrf_token" \
|
||||
-d "{
|
||||
\"identifier\": \"ai-tax-agent-bootstrap\",
|
||||
\"description\": \"Bootstrap token for AI Tax Agent setup\",
|
||||
\"expires\": \"2025-12-31T23:59:59Z\"
|
||||
}" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$token_response" ]; then
|
||||
local token
|
||||
token=$(echo "$token_response" | python3 -c "import sys, json; print(json.load(sys.stdin)['key'])" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$token" ]; then
|
||||
echo -e "${GREEN}✅ API token created${NC}"
|
||||
echo "$token"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "${RED}❌ Failed to get API token${NC}"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Main function
|
||||
main() {
|
||||
# Check if setup is already complete
|
||||
if check_setup_status; then
|
||||
echo -e "${GREEN}✅ Authentik setup is already complete${NC}"
|
||||
|
||||
# Try to get API token
|
||||
local api_token
|
||||
if api_token=$(get_api_token); then
|
||||
echo -e "${GREEN}🔑 API token obtained${NC}"
|
||||
|
||||
# Update .env file with token
|
||||
update_env_var "AUTHENTIK_BOOTSTRAP_TOKEN" "$api_token"
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}🎉 Setup complete! You can now run:${NC}"
|
||||
echo -e " ${BLUE}make setup-authentik${NC} - to import blueprint configuration"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Could not get API token automatically${NC}"
|
||||
echo -e "${BLUE}📋 Manual steps:${NC}"
|
||||
echo -e " 1. Open ${BLUE}https://auth.local${NC} and log in"
|
||||
echo -e " 2. Go to Admin Interface > Tokens"
|
||||
echo -e " 3. Create a new token and update AUTHENTIK_BOOTSTRAP_TOKEN in .env"
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}📋 Initial setup still required:${NC}"
|
||||
echo -e " 1. Open ${BLUE}https://auth.local/if/flow/initial-setup/${NC}"
|
||||
echo -e " 2. Complete the setup wizard with these credentials:"
|
||||
echo -e " • Email: ${BLUE}$ADMIN_EMAIL${NC}"
|
||||
echo -e " • Password: ${BLUE}$ADMIN_PASSWORD${NC}"
|
||||
echo -e " 3. Re-run this script after setup is complete"
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
rm -f /tmp/authentik_cookies.txt /tmp/login_response.html
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
125
scripts/authentik_setup.sh
Executable file
125
scripts/authentik_setup.sh
Executable file
@@ -0,0 +1,125 @@
|
||||
#!/bin/bash
|
||||
# Automatically complete Authentik initial setup
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
DOMAIN=${DOMAIN:-local}
|
||||
AUTHENTIK_URL="https://auth.${DOMAIN}"
|
||||
ADMIN_EMAIL="admin@local"
|
||||
ADMIN_PASSWORD="${AUTHENTIK_ADMIN_PASSWORD:-admin123}"
|
||||
|
||||
echo -e "${BLUE}🤖 Automatically completing Authentik initial setup...${NC}"
|
||||
echo
|
||||
|
||||
# Function to complete initial setup
|
||||
complete_initial_setup() {
|
||||
local host
|
||||
host=$(echo "$AUTHENTIK_URL" | sed -E 's#^https?://([^/]+).*$#\1#')
|
||||
local resolve=(--resolve "${host}:443:127.0.0.1")
|
||||
|
||||
echo -e "${YELLOW}📋 Completing initial setup form...${NC}"
|
||||
|
||||
# Get the initial setup page and extract CSRF token
|
||||
local setup_page
|
||||
setup_page=$(curl -ks "${resolve[@]}" -c /tmp/authentik_setup_cookies.txt "$AUTHENTIK_URL/if/flow/initial-setup/" || echo "")
|
||||
|
||||
if [ -z "$setup_page" ]; then
|
||||
echo -e "${RED}❌ Could not access setup page${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract CSRF token
|
||||
local csrf_token
|
||||
csrf_token=$(echo "$setup_page" | grep -o 'csrfmiddlewaretoken[^>]*value="[^"]*"' | sed 's/.*value="\([^"]*\)".*/\1/' | head -1 || echo "")
|
||||
|
||||
if [ -z "$csrf_token" ]; then
|
||||
echo -e "${RED}❌ Could not extract CSRF token${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✅ CSRF token extracted${NC}"
|
||||
|
||||
# Submit the initial setup form
|
||||
local setup_response
|
||||
setup_response=$(curl -ks "${resolve[@]}" -b /tmp/authentik_setup_cookies.txt -c /tmp/authentik_setup_cookies.txt \
|
||||
-X POST "$AUTHENTIK_URL/if/flow/initial-setup/" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-H "Referer: $AUTHENTIK_URL/if/flow/initial-setup/" \
|
||||
-d "csrfmiddlewaretoken=$csrf_token&email=$ADMIN_EMAIL&password=$ADMIN_PASSWORD&password_repeat=$ADMIN_PASSWORD" \
|
||||
-w '%{http_code}' -o /tmp/setup_response.html || echo "")
|
||||
|
||||
if [[ "$setup_response" =~ ^(200|302)$ ]]; then
|
||||
echo -e "${GREEN}✅ Initial setup completed successfully${NC}"
|
||||
|
||||
# Wait a moment for setup to complete
|
||||
sleep 3
|
||||
|
||||
# Verify setup is complete by checking if setup page returns 404
|
||||
local verify_code
|
||||
verify_code=$(curl -ks "${resolve[@]}" -o /dev/null -w '%{http_code}' "$AUTHENTIK_URL/if/flow/initial-setup/" || true)
|
||||
|
||||
if [[ "$verify_code" == "404" ]]; then
|
||||
echo -e "${GREEN}✅ Setup verification successful${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Setup may not be complete (verification returned $verify_code)${NC}"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}❌ Setup failed (HTTP $setup_response)${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check if setup is needed
|
||||
check_setup_needed() {
|
||||
local host
|
||||
host=$(echo "$AUTHENTIK_URL" | sed -E 's#^https?://([^/]+).*$#\1#')
|
||||
local resolve=(--resolve "${host}:443:127.0.0.1")
|
||||
local setup_code
|
||||
setup_code=$(curl -ks "${resolve[@]}" -o /dev/null -w '%{http_code}' "$AUTHENTIK_URL/if/flow/initial-setup/" || true)
|
||||
|
||||
#TODO: this is not a valid check if setup is already complete, needs work. Authentik returns 200 even if setup is complete
|
||||
if [[ "$setup_code" == "200" ]]; then
|
||||
return 0 # Setup is needed
|
||||
else
|
||||
return 1 # Setup is not needed
|
||||
fi
|
||||
}
|
||||
|
||||
# Main function
|
||||
main() {
|
||||
if check_setup_needed; then
|
||||
echo -e "${YELLOW}📋 Initial setup is required${NC}"
|
||||
|
||||
if complete_initial_setup; then
|
||||
echo -e "${GREEN}🎉 Authentik initial setup completed automatically!${NC}"
|
||||
echo
|
||||
echo -e "${BLUE}📋 Next steps:${NC}"
|
||||
echo -e " 1. Run ${BLUE}make complete-authentik-setup${NC} to get API token"
|
||||
echo -e " 2. Run ${BLUE}make setup-authentik${NC} to import blueprint configuration"
|
||||
echo -e " 3. Or run ${BLUE}make setup-sso${NC} to do both automatically"
|
||||
else
|
||||
echo -e "${RED}❌ Automatic setup failed${NC}"
|
||||
echo -e "${YELLOW}📋 Manual setup required:${NC}"
|
||||
echo -e " 1. Open ${BLUE}https://auth.local/if/flow/initial-setup/${NC}"
|
||||
echo -e " 2. Use credentials: ${BLUE}$ADMIN_EMAIL${NC} / ${BLUE}$ADMIN_PASSWORD${NC}"
|
||||
fi
|
||||
else
|
||||
echo -e "${GREEN}✅ Authentik setup is already complete${NC}"
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
rm -f /tmp/authentik_setup_cookies.txt /tmp/setup_response.html
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
164
scripts/build-and-push-images.sh
Executable file
164
scripts/build-and-push-images.sh
Executable file
@@ -0,0 +1,164 @@
|
||||
#!/bin/bash
|
||||
# Build and Push Docker Images to Registry
|
||||
# Usage: ./scripts/build-and-push-images.sh [registry] [version] [owner] [skip-existing]
|
||||
# Example: ./scripts/build-and-push-images.sh gitea.harkon.co.uk v1.0.1 harkon
|
||||
# Example (skip existing): ./scripts/build-and-push-images.sh gitea.harkon.co.uk v1.0.1 harkon skip
|
||||
|
||||
# Don't exit on error - we want to continue building other services
|
||||
set +e
|
||||
|
||||
# Configuration
|
||||
REGISTRY="${1:-gitea.harkon.co.uk}"
|
||||
VERSION="${2:-latest}"
|
||||
OWNER="${3:-harkon}" # Gitea organization/team name
|
||||
SKIP_EXISTING="${4:-}" # Set to "skip" to skip already built images
|
||||
|
||||
# Note: Gitea container registry requires format: {registry}/{owner}/{image}:{tag}
|
||||
# The owner must be your Gitea username or organization name
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
log_info() {
|
||||
echo -e "${BLUE}ℹ️ $1${NC}"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}✅ $1${NC}"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}⚠️ $1${NC}"
|
||||
}
|
||||
|
||||
# List of services to build
|
||||
SERVICES=(
|
||||
"svc-ingestion"
|
||||
"svc-extract"
|
||||
"svc-kg"
|
||||
"svc-rag-retriever"
|
||||
"svc-rag-indexer"
|
||||
"svc-forms"
|
||||
"svc-hmrc"
|
||||
"svc-ocr"
|
||||
"svc-rpa"
|
||||
"svc-normalize-map"
|
||||
"svc-reason"
|
||||
"svc-firm-connectors"
|
||||
"svc-coverage"
|
||||
"ui-review"
|
||||
)
|
||||
|
||||
# Check if Docker is running
|
||||
if ! docker info > /dev/null 2>&1; then
|
||||
log_warning "Docker is not running. Please start Docker and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Login to registry
|
||||
log_info "Logging in to registry: $REGISTRY"
|
||||
docker login $REGISTRY
|
||||
|
||||
# Build and push each service
|
||||
for service in "${SERVICES[@]}"; do
|
||||
log_info "Building $service..."
|
||||
|
||||
# Determine Dockerfile path
|
||||
if [ "$service" = "ui-review" ]; then
|
||||
DOCKERFILE="apps/ui_review/Dockerfile"
|
||||
else
|
||||
# Convert service name to directory name (e.g., svc-ingestion -> svc_ingestion)
|
||||
DIR_NAME=$(echo $service | tr '-' '_')
|
||||
DOCKERFILE="apps/$DIR_NAME/Dockerfile"
|
||||
fi
|
||||
|
||||
# Check if Dockerfile exists
|
||||
if [ ! -f "$DOCKERFILE" ]; then
|
||||
log_warning "Dockerfile not found: $DOCKERFILE - Skipping $service"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Build image
|
||||
IMAGE_NAME="$REGISTRY/$OWNER/$service:$VERSION"
|
||||
|
||||
# Check if image already exists locally (if skip mode enabled)
|
||||
if [ "$SKIP_EXISTING" = "skip" ]; then
|
||||
if docker images --format "{{.Repository}}:{{.Tag}}" | grep -q "^$IMAGE_NAME$"; then
|
||||
log_info "Image already exists: $IMAGE_NAME - Skipping build"
|
||||
|
||||
# Still try to push it
|
||||
log_info "Pushing existing image: $IMAGE_NAME"
|
||||
if docker push $IMAGE_NAME 2>/dev/null; then
|
||||
log_success "Pushed: $IMAGE_NAME"
|
||||
else
|
||||
log_warning "Failed to push: $IMAGE_NAME (may already exist in registry)"
|
||||
fi
|
||||
|
||||
# Also push latest tag
|
||||
if [ "$VERSION" != "latest" ]; then
|
||||
LATEST_IMAGE="$REGISTRY/$OWNER/$service:latest"
|
||||
if docker images --format "{{.Repository}}:{{.Tag}}" | grep -q "^$LATEST_IMAGE$"; then
|
||||
docker push $LATEST_IMAGE 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
log_info "Building: $IMAGE_NAME"
|
||||
|
||||
if docker build \
|
||||
-t $IMAGE_NAME \
|
||||
-f $DOCKERFILE \
|
||||
--build-arg VERSION=$VERSION \
|
||||
--build-arg BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') \
|
||||
. ; then
|
||||
|
||||
log_success "Built: $IMAGE_NAME"
|
||||
|
||||
# Push image
|
||||
log_info "Pushing: $IMAGE_NAME"
|
||||
if docker push $IMAGE_NAME; then
|
||||
log_success "Pushed: $IMAGE_NAME"
|
||||
else
|
||||
log_warning "Failed to push: $IMAGE_NAME"
|
||||
fi
|
||||
|
||||
# Also tag as version number if not latest
|
||||
if [ "$VERSION" != "latest" ]; then
|
||||
LATEST_IMAGE="$REGISTRY/$OWNER/$service:latest"
|
||||
docker tag $IMAGE_NAME $LATEST_IMAGE
|
||||
if docker push $LATEST_IMAGE; then
|
||||
log_success "Also pushed as: $LATEST_IMAGE"
|
||||
else
|
||||
log_warning "Failed to push: $LATEST_IMAGE"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
log_warning "Failed to build: $IMAGE_NAME - Continuing with next service"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
done
|
||||
|
||||
log_success "🎉 All images built and pushed successfully!"
|
||||
log_info "Images pushed to: $REGISTRY/$OWNER"
|
||||
log_info "Version: $VERSION"
|
||||
|
||||
# Show summary
|
||||
echo ""
|
||||
echo "Summary of pushed images:"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
for service in "${SERVICES[@]}"; do
|
||||
echo " $REGISTRY/$OWNER/$service:$VERSION"
|
||||
done
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
log_info "Next steps:"
|
||||
echo " 1. Deploy to production: ./scripts/deploy-to-production.sh"
|
||||
echo " 2. Or deploy specific step: ./scripts/deploy-to-production.sh services"
|
||||
132
scripts/build-base-images.sh
Executable file
132
scripts/build-base-images.sh
Executable file
@@ -0,0 +1,132 @@
|
||||
#!/bin/bash
|
||||
# Build and Push Base Docker Images
|
||||
# Usage: ./scripts/build-base-images.sh [registry] [version] [owner]
|
||||
# Example: ./scripts/build-base-images.sh gitea.harkon.co.uk v1.0.1 harkon
|
||||
|
||||
set +e
|
||||
|
||||
# Configuration
|
||||
REGISTRY="${1:-gitea.harkon.co.uk}"
|
||||
VERSION="${2:-v1.0.1}"
|
||||
OWNER="${3:-harkon}"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
|
||||
log_info() {
|
||||
echo -e "${BLUE}ℹ️ $1${NC}"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}✅ $1${NC}"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}⚠️ $1${NC}"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}❌ $1${NC}"
|
||||
}
|
||||
|
||||
# Check if Docker is running
|
||||
if ! docker info > /dev/null 2>&1; then
|
||||
log_error "Docker is not running. Please start Docker and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Login to registry
|
||||
log_info "Logging in to registry: $REGISTRY"
|
||||
docker login $REGISTRY
|
||||
|
||||
echo ""
|
||||
log_info "Building base images for AI Tax Agent"
|
||||
log_info "Registry: $REGISTRY"
|
||||
log_info "Owner: $OWNER"
|
||||
log_info "Version: $VERSION"
|
||||
echo ""
|
||||
|
||||
# Build base-runtime image
|
||||
log_info "Building base-runtime image (core dependencies for all services)..."
|
||||
BASE_RUNTIME_IMAGE="$REGISTRY/$OWNER/base-runtime:$VERSION"
|
||||
|
||||
if docker build \
|
||||
-t $BASE_RUNTIME_IMAGE \
|
||||
-f infra/docker/base-runtime.Dockerfile \
|
||||
--build-arg VERSION=$VERSION \
|
||||
. ; then
|
||||
|
||||
log_success "Built: $BASE_RUNTIME_IMAGE"
|
||||
|
||||
# Push image
|
||||
log_info "Pushing: $BASE_RUNTIME_IMAGE"
|
||||
if docker push $BASE_RUNTIME_IMAGE; then
|
||||
log_success "Pushed: $BASE_RUNTIME_IMAGE"
|
||||
else
|
||||
log_error "Failed to push: $BASE_RUNTIME_IMAGE"
|
||||
fi
|
||||
|
||||
# Tag as latest
|
||||
LATEST_IMAGE="$REGISTRY/$OWNER/base-runtime:latest"
|
||||
docker tag $BASE_RUNTIME_IMAGE $LATEST_IMAGE
|
||||
if docker push $LATEST_IMAGE; then
|
||||
log_success "Also pushed as: $LATEST_IMAGE"
|
||||
fi
|
||||
else
|
||||
log_error "Failed to build: $BASE_RUNTIME_IMAGE"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
|
||||
# Build base-ml image
|
||||
log_info "Building base-ml image (ML dependencies - this will take 5-10 minutes)..."
|
||||
BASE_ML_IMAGE="$REGISTRY/$OWNER/base-ml:$VERSION"
|
||||
|
||||
if docker build \
|
||||
-t $BASE_ML_IMAGE \
|
||||
-f infra/docker/base-ml.Dockerfile \
|
||||
--build-arg VERSION=$VERSION \
|
||||
. ; then
|
||||
|
||||
log_success "Built: $BASE_ML_IMAGE"
|
||||
|
||||
# Push image
|
||||
log_info "Pushing: $BASE_ML_IMAGE (this will take a few minutes)..."
|
||||
if docker push $BASE_ML_IMAGE; then
|
||||
log_success "Pushed: $BASE_ML_IMAGE"
|
||||
else
|
||||
log_error "Failed to push: $BASE_ML_IMAGE"
|
||||
fi
|
||||
|
||||
# Tag as latest
|
||||
LATEST_IMAGE="$REGISTRY/$OWNER/base-ml:latest"
|
||||
docker tag $BASE_ML_IMAGE $LATEST_IMAGE
|
||||
if docker push $LATEST_IMAGE; then
|
||||
log_success "Also pushed as: $LATEST_IMAGE"
|
||||
fi
|
||||
else
|
||||
log_error "Failed to build: $BASE_ML_IMAGE"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
log_success "🎉 Base images built and pushed successfully!"
|
||||
echo ""
|
||||
echo "Summary:"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo " $REGISTRY/$OWNER/base-runtime:$VERSION (~300MB)"
|
||||
echo " $REGISTRY/$OWNER/base-ml:$VERSION (~1.2GB)"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
log_info "Next steps:"
|
||||
echo " 1. Update ML service Dockerfiles to use base-ml image"
|
||||
echo " 2. Update non-ML service Dockerfiles to use base-runtime image (optional)"
|
||||
echo " 3. Rebuild services with: ./scripts/build-and-push-images.sh"
|
||||
echo ""
|
||||
log_info "Check image sizes:"
|
||||
echo " docker images | grep '$REGISTRY/$OWNER/base'"
|
||||
echo ""
|
||||
|
||||
401
scripts/cleanup-infra-structure.sh
Executable file
401
scripts/cleanup-infra-structure.sh
Executable file
@@ -0,0 +1,401 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Cleanup and align infrastructure structure
|
||||
# This script consolidates configurations and removes duplication
|
||||
|
||||
set -e
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
|
||||
log_info() {
|
||||
echo -e "${BLUE}ℹ️ $1${NC}"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}✅ $1${NC}"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}⚠️ $1${NC}"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}❌ $1${NC}"
|
||||
}
|
||||
|
||||
# Script directory
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
INFRA_DIR="$PROJECT_ROOT/infra"
|
||||
|
||||
log_info "Cleaning up infrastructure structure..."
|
||||
echo " Project Root: $PROJECT_ROOT"
|
||||
echo " Infra Dir: $INFRA_DIR"
|
||||
echo ""
|
||||
|
||||
# Step 1: Backup current structure
|
||||
log_info "Step 1: Creating backup..."
|
||||
BACKUP_DIR="$PROJECT_ROOT/infra-backup-$(date +%Y%m%d_%H%M%S)"
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
cp -r "$INFRA_DIR/configs" "$BACKUP_DIR/" 2>/dev/null || true
|
||||
log_success "Backup created at $BACKUP_DIR"
|
||||
|
||||
# Step 2: Align Traefik configurations
|
||||
log_info "Step 2: Aligning Traefik configurations..."
|
||||
|
||||
# The source of truth is infra/compose/traefik/config/
|
||||
# Remove duplicates from infra/configs/traefik/config/
|
||||
if [ -d "$INFRA_DIR/configs/traefik/config" ]; then
|
||||
log_warning " Removing duplicate Traefik configs from infra/configs/traefik/config/"
|
||||
rm -rf "$INFRA_DIR/configs/traefik/config"
|
||||
log_success " Removed duplicate Traefik configs"
|
||||
fi
|
||||
|
||||
# Keep only app-specific Traefik middleware in configs
|
||||
mkdir -p "$INFRA_DIR/configs/traefik"
|
||||
cat > "$INFRA_DIR/configs/traefik/app-middlewares.yml" << 'EOF'
|
||||
# Application-specific Traefik middlewares
|
||||
# These are loaded by the application infrastructure, not the external Traefik
|
||||
|
||||
http:
|
||||
middlewares:
|
||||
# Large upload middleware for Gitea registry
|
||||
gitea-large-upload:
|
||||
buffering:
|
||||
maxRequestBodyBytes: 5368709120 # 5GB
|
||||
memRequestBodyBytes: 104857600 # 100MB
|
||||
maxResponseBodyBytes: 5368709120 # 5GB
|
||||
memResponseBodyBytes: 104857600 # 100MB
|
||||
retryExpression: "IsNetworkError() && Attempts() < 3"
|
||||
|
||||
# Rate limiting for public APIs
|
||||
api-ratelimit:
|
||||
rateLimit:
|
||||
average: 100
|
||||
burst: 50
|
||||
period: 1s
|
||||
|
||||
# Security headers
|
||||
security-headers:
|
||||
headers:
|
||||
frameDeny: true
|
||||
sslRedirect: true
|
||||
browserXssFilter: true
|
||||
contentTypeNosniff: true
|
||||
stsIncludeSubdomains: true
|
||||
stsPreload: true
|
||||
stsSeconds: 31536000
|
||||
EOF
|
||||
|
||||
log_success " Created app-specific Traefik middlewares"
|
||||
|
||||
# Step 3: Align Authentik configurations
|
||||
log_info "Step 3: Aligning Authentik configurations..."
|
||||
|
||||
# infra/compose/authentik/ - Production service configs
|
||||
# infra/configs/authentik/ - Application bootstrap configs (keep separate)
|
||||
|
||||
if [ -d "$INFRA_DIR/configs/authentik" ]; then
|
||||
log_info " Keeping app-specific Authentik bootstrap in infra/configs/authentik/"
|
||||
log_success " Authentik configs aligned"
|
||||
fi
|
||||
|
||||
# Step 4: Clean up old directories
|
||||
log_info "Step 4: Cleaning up old directories..."
|
||||
|
||||
# Remove old standalone config directories that were moved
|
||||
OLD_DIRS=(
|
||||
"$INFRA_DIR/traefik"
|
||||
"$INFRA_DIR/grafana"
|
||||
"$INFRA_DIR/prometheus"
|
||||
"$INFRA_DIR/loki"
|
||||
"$INFRA_DIR/promtail"
|
||||
"$INFRA_DIR/vault"
|
||||
"$INFRA_DIR/neo4j"
|
||||
"$INFRA_DIR/postgres"
|
||||
)
|
||||
|
||||
for dir in "${OLD_DIRS[@]}"; do
|
||||
if [ -d "$dir" ] && [ -f "$INFRA_DIR/configs/$(basename $dir)/.moved" ]; then
|
||||
log_warning " Removing old directory: $dir"
|
||||
rm -rf "$dir"
|
||||
log_success " Removed $dir"
|
||||
fi
|
||||
done
|
||||
|
||||
# Step 5: Update .gitignore
|
||||
log_info "Step 5: Updating .gitignore..."
|
||||
|
||||
cat > "$INFRA_DIR/.gitignore" << 'EOF'
|
||||
# Environment files (contain secrets)
|
||||
environments/*/.env
|
||||
!environments/*/.env.example
|
||||
compose/*/.env
|
||||
!compose/env.example
|
||||
|
||||
# Certificates
|
||||
certs/*/
|
||||
!certs/.gitkeep
|
||||
compose/*/certs/
|
||||
!compose/*/certs/.gitkeep
|
||||
|
||||
# Provider credentials
|
||||
compose/traefik/.provider.env
|
||||
configs/traefik/.provider.env
|
||||
|
||||
# Data directories
|
||||
compose/*/data/
|
||||
compose/*/media/
|
||||
compose/authentik/media/
|
||||
compose/authentik/custom-templates/
|
||||
compose/portainer/portainer/
|
||||
|
||||
# Backup files
|
||||
*.backup
|
||||
*.tmp
|
||||
*-backup-*/
|
||||
|
||||
# Docker volumes (if mounted locally)
|
||||
volumes/
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# Moved markers
|
||||
**/.moved
|
||||
EOF
|
||||
|
||||
log_success ".gitignore updated"
|
||||
|
||||
# Step 6: Create README for external services
|
||||
log_info "Step 6: Creating documentation..."
|
||||
|
||||
cat > "$INFRA_DIR/compose/README.md" << 'EOF'
|
||||
# External Services
|
||||
|
||||
This directory contains Docker Compose configurations for external services that run on the production server.
|
||||
|
||||
## Services
|
||||
|
||||
### Traefik
|
||||
- **Location**: `traefik/`
|
||||
- **Purpose**: Reverse proxy and load balancer for all services
|
||||
- **Deploy**: `cd traefik && docker compose up -d`
|
||||
- **Access**: https://traefik.harkon.co.uk
|
||||
|
||||
### Authentik
|
||||
- **Location**: `authentik/`
|
||||
- **Purpose**: SSO and authentication provider
|
||||
- **Deploy**: `cd authentik && docker compose up -d`
|
||||
- **Access**: https://authentik.harkon.co.uk
|
||||
|
||||
### Gitea
|
||||
- **Location**: `gitea/`
|
||||
- **Purpose**: Git repository hosting and container registry
|
||||
- **Deploy**: `cd gitea && docker compose up -d`
|
||||
- **Access**: https://gitea.harkon.co.uk
|
||||
|
||||
### Nextcloud
|
||||
- **Location**: `nextcloud/`
|
||||
- **Purpose**: File storage and collaboration
|
||||
- **Deploy**: `cd nextcloud && docker compose up -d`
|
||||
- **Access**: https://nextcloud.harkon.co.uk
|
||||
|
||||
### Portainer
|
||||
- **Location**: `portainer/`
|
||||
- **Purpose**: Docker management UI
|
||||
- **Deploy**: `cd portainer && docker compose up -d`
|
||||
- **Access**: https://portainer.harkon.co.uk
|
||||
|
||||
## Deployment
|
||||
|
||||
### Production (Remote Server)
|
||||
|
||||
```bash
|
||||
# SSH to server
|
||||
ssh deploy@141.136.35.199
|
||||
|
||||
# Navigate to service directory
|
||||
cd /opt/ai-tax-agent/infra/compose/<service>
|
||||
|
||||
# Deploy service
|
||||
docker compose up -d
|
||||
|
||||
# Check logs
|
||||
docker compose logs -f
|
||||
|
||||
# Check status
|
||||
docker compose ps
|
||||
```
|
||||
|
||||
### Local Development
|
||||
|
||||
For local development, use the all-in-one compose file:
|
||||
|
||||
```bash
|
||||
cd infra/compose
|
||||
docker compose -f docker-compose.local.yml up -d
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Each service has its own `.env` file for environment-specific configuration:
|
||||
|
||||
- `traefik/.provider.env` - GoDaddy API credentials
|
||||
- `authentik/.env` - Authentik secrets
|
||||
- `gitea/.env` - Gitea database credentials
|
||||
|
||||
## Networks
|
||||
|
||||
All services use shared Docker networks:
|
||||
|
||||
- `frontend` - Public-facing services
|
||||
- `backend` - Internal services
|
||||
|
||||
Create networks before deploying:
|
||||
|
||||
```bash
|
||||
docker network create frontend
|
||||
docker network create backend
|
||||
```
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Update Service
|
||||
|
||||
```bash
|
||||
cd /opt/ai-tax-agent/infra/compose/<service>
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### Restart Service
|
||||
|
||||
```bash
|
||||
cd /opt/ai-tax-agent/infra/compose/<service>
|
||||
docker compose restart
|
||||
```
|
||||
|
||||
### View Logs
|
||||
|
||||
```bash
|
||||
cd /opt/ai-tax-agent/infra/compose/<service>
|
||||
docker compose logs -f
|
||||
```
|
||||
|
||||
### Backup Data
|
||||
|
||||
```bash
|
||||
# Backup volumes
|
||||
docker run --rm -v <service>_data:/data -v $(pwd):/backup alpine tar czf /backup/<service>-backup.tar.gz /data
|
||||
```
|
||||
|
||||
## Integration with Application
|
||||
|
||||
These external services are used by the application infrastructure:
|
||||
|
||||
- **Traefik** - Routes traffic to application services
|
||||
- **Authentik** - Provides SSO for application UIs
|
||||
- **Gitea** - Hosts Docker images for application services
|
||||
|
||||
The application infrastructure is deployed separately using:
|
||||
|
||||
```bash
|
||||
./infra/scripts/deploy.sh production infrastructure
|
||||
./infra/scripts/deploy.sh production services
|
||||
```
|
||||
EOF
|
||||
|
||||
log_success "Created external services README"
|
||||
|
||||
# Step 7: Create deployment helper script
|
||||
log_info "Step 7: Creating deployment helper script..."
|
||||
|
||||
cat > "$SCRIPT_DIR/deploy-external.sh" << 'EOF'
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy external services on production server
|
||||
# Usage: ./scripts/deploy-external.sh <service>
|
||||
|
||||
set -e
|
||||
|
||||
SERVICE=$1
|
||||
|
||||
if [ -z "$SERVICE" ]; then
|
||||
echo "Usage: $0 <service>"
|
||||
echo ""
|
||||
echo "Available services:"
|
||||
echo " traefik"
|
||||
echo " authentik"
|
||||
echo " gitea"
|
||||
echo " nextcloud"
|
||||
echo " portainer"
|
||||
echo " all"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
COMPOSE_DIR="$PROJECT_ROOT/infra/compose"
|
||||
|
||||
deploy_service() {
|
||||
local svc=$1
|
||||
echo "🚀 Deploying $svc..."
|
||||
|
||||
if [ ! -d "$COMPOSE_DIR/$svc" ]; then
|
||||
echo "❌ Service directory not found: $COMPOSE_DIR/$svc"
|
||||
return 1
|
||||
fi
|
||||
|
||||
cd "$COMPOSE_DIR/$svc"
|
||||
docker compose up -d
|
||||
echo "✅ $svc deployed"
|
||||
}
|
||||
|
||||
if [ "$SERVICE" = "all" ]; then
|
||||
deploy_service "traefik"
|
||||
sleep 5
|
||||
deploy_service "authentik"
|
||||
sleep 5
|
||||
deploy_service "gitea"
|
||||
deploy_service "nextcloud"
|
||||
deploy_service "portainer"
|
||||
else
|
||||
deploy_service "$SERVICE"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🎉 Deployment complete!"
|
||||
EOF
|
||||
|
||||
chmod +x "$SCRIPT_DIR/deploy-external.sh"
|
||||
log_success "Created deploy-external.sh script"
|
||||
|
||||
# Step 8: Summary
|
||||
echo ""
|
||||
log_success "Cleanup complete!"
|
||||
echo ""
|
||||
log_info "Summary of changes:"
|
||||
echo " ✅ Removed duplicate Traefik configs"
|
||||
echo " ✅ Created app-specific Traefik middlewares"
|
||||
echo " ✅ Aligned Authentik configurations"
|
||||
echo " ✅ Cleaned up old directories"
|
||||
echo " ✅ Updated .gitignore"
|
||||
echo " ✅ Created external services README"
|
||||
echo " ✅ Created deploy-external.sh script"
|
||||
echo ""
|
||||
log_info "Backup location: $BACKUP_DIR"
|
||||
echo ""
|
||||
log_info "Next steps:"
|
||||
echo " 1. Review changes in infra/ directory"
|
||||
echo " 2. Update Makefile with new targets"
|
||||
echo " 3. Test local deployment: make run"
|
||||
echo " 4. Test external service deployment: ./scripts/deploy-external.sh traefik"
|
||||
echo ""
|
||||
|
||||
155
scripts/complete-authentik-setup.sh
Executable file
155
scripts/complete-authentik-setup.sh
Executable file
@@ -0,0 +1,155 @@
|
||||
#!/bin/bash
|
||||
# Complete Authentik initial setup and get API token
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
DOMAIN=${DOMAIN:-local}
|
||||
AUTHENTIK_URL="https://auth.${DOMAIN}"
|
||||
ADMIN_EMAIL="admin@local"
|
||||
ADMIN_PASSWORD="${AUTHENTIK_ADMIN_PASSWORD:-admin123}"
|
||||
ENV_FILE="infra/compose/.env"
|
||||
|
||||
echo -e "${BLUE}🔧 Completing Authentik initial setup...${NC}"
|
||||
echo
|
||||
|
||||
# Function to update env file
|
||||
update_env_var() {
|
||||
local var_name="$1"
|
||||
local var_value="$2"
|
||||
|
||||
if grep -q "^${var_name}=" "$ENV_FILE"; then
|
||||
# Update existing variable
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
# macOS
|
||||
sed -i '' "s|^${var_name}=.*|${var_name}=${var_value}|" "$ENV_FILE"
|
||||
else
|
||||
# Linux
|
||||
sed -i "s|^${var_name}=.*|${var_name}=${var_value}|" "$ENV_FILE"
|
||||
fi
|
||||
echo -e "${GREEN}✅ Updated ${var_name}${NC}"
|
||||
else
|
||||
# Add new variable
|
||||
echo "${var_name}=${var_value}" >> "$ENV_FILE"
|
||||
echo -e "${GREEN}✅ Added ${var_name}${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check if setup is complete
|
||||
check_setup_status() {
|
||||
local host
|
||||
host=$(echo "$AUTHENTIK_URL" | sed -E 's#^https?://([^/]+).*$#\1#')
|
||||
local resolve=(--resolve "${host}:443:127.0.0.1")
|
||||
local setup_code
|
||||
setup_code=$(curl -ks "${resolve[@]}" -o /dev/null -w '%{http_code}' "$AUTHENTIK_URL/if/flow/initial-setup/" || true)
|
||||
|
||||
if [[ "$setup_code" == "404" ]]; then
|
||||
return 0 # Setup is complete
|
||||
else
|
||||
return 1 # Setup is still needed
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to get API token
|
||||
get_api_token() {
|
||||
echo -e "${YELLOW}🔑 Getting API token...${NC}"
|
||||
|
||||
local host
|
||||
host=$(echo "$AUTHENTIK_URL" | sed -E 's#^https?://([^/]+).*$#\1#')
|
||||
local resolve=(--resolve "${host}:443:127.0.0.1")
|
||||
|
||||
# Get CSRF token first
|
||||
local csrf_token
|
||||
csrf_token=$(curl -ks "${resolve[@]}" -c /tmp/authentik_cookies.txt "$AUTHENTIK_URL/if/flow/default-authentication-flow/" | grep -o 'csrfmiddlewaretoken[^>]*value="[^"]*"' | sed 's/.*value="\([^"]*\)".*/\1/' || echo "")
|
||||
|
||||
if [ -z "$csrf_token" ]; then
|
||||
echo -e "${RED}❌ Could not get CSRF token${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Login to get session
|
||||
local login_response
|
||||
login_response=$(curl -ks "${resolve[@]}" -b /tmp/authentik_cookies.txt -c /tmp/authentik_cookies.txt \
|
||||
-X POST "$AUTHENTIK_URL/if/flow/default-authentication-flow/" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-H "Referer: $AUTHENTIK_URL/if/flow/default-authentication-flow/" \
|
||||
-d "csrfmiddlewaretoken=$csrf_token&uid_field=$ADMIN_EMAIL&password=$ADMIN_PASSWORD" \
|
||||
-w '%{http_code}' -o /tmp/login_response.html || echo "")
|
||||
|
||||
if [[ "$login_response" =~ ^(200|302)$ ]]; then
|
||||
echo -e "${GREEN}✅ Login successful${NC}"
|
||||
|
||||
# Create API token
|
||||
local token_response
|
||||
token_response=$(curl -ks "${resolve[@]}" -b /tmp/authentik_cookies.txt \
|
||||
-X POST "$AUTHENTIK_URL/api/v3/core/tokens/" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-CSRFToken: $csrf_token" \
|
||||
-d "{
|
||||
\"identifier\": \"ai-tax-agent-bootstrap\",
|
||||
\"description\": \"Bootstrap token for AI Tax Agent setup\",
|
||||
\"expires\": \"2025-12-31T23:59:59Z\"
|
||||
}" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$token_response" ]; then
|
||||
local token
|
||||
token=$(echo "$token_response" | python3 -c "import sys, json; print(json.load(sys.stdin)['key'])" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$token" ]; then
|
||||
echo -e "${GREEN}✅ API token created${NC}"
|
||||
echo "$token"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "${RED}❌ Failed to get API token${NC}"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Main function
|
||||
main() {
|
||||
# Check if setup is already complete
|
||||
if check_setup_status; then
|
||||
echo -e "${GREEN}✅ Authentik setup is already complete${NC}"
|
||||
|
||||
# Try to get API token
|
||||
local api_token
|
||||
if api_token=$(get_api_token); then
|
||||
echo -e "${GREEN}🔑 API token obtained${NC}"
|
||||
|
||||
# Update .env file with token
|
||||
update_env_var "AUTHENTIK_BOOTSTRAP_TOKEN" "$api_token"
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}🎉 Setup complete! You can now run:${NC}"
|
||||
echo -e " ${BLUE}make setup-authentik${NC} - to import blueprint configuration"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Could not get API token automatically${NC}"
|
||||
echo -e "${BLUE}📋 Manual steps:${NC}"
|
||||
echo -e " 1. Open ${BLUE}https://auth.local${NC} and log in"
|
||||
echo -e " 2. Go to Admin Interface > Tokens"
|
||||
echo -e " 3. Create a new token and update AUTHENTIK_BOOTSTRAP_TOKEN in .env"
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}📋 Initial setup still required:${NC}"
|
||||
echo -e " 1. Open ${BLUE}https://auth.local/if/flow/initial-setup/${NC}"
|
||||
echo -e " 2. Complete the setup wizard with these credentials:"
|
||||
echo -e " • Email: ${BLUE}$ADMIN_EMAIL${NC}"
|
||||
echo -e " • Password: ${BLUE}$ADMIN_PASSWORD${NC}"
|
||||
echo -e " 3. Re-run this script after setup is complete"
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
rm -f /tmp/authentik_cookies.txt /tmp/login_response.html
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
27
scripts/create-networks.sh
Executable file
27
scripts/create-networks.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
# Create external Docker networks for AI Tax Agent
|
||||
|
||||
set -e
|
||||
|
||||
echo "Creating external Docker networks..."
|
||||
|
||||
# Create frontend network (for Traefik and public-facing services)
|
||||
if ! docker network ls | grep -q "ai-tax-agent-frontend"; then
|
||||
docker network create ai-tax-agent-frontend
|
||||
echo "✅ Created frontend network: ai-tax-agent-frontend"
|
||||
else
|
||||
echo "ℹ️ Frontend network already exists: ai-tax-agent-frontend"
|
||||
fi
|
||||
|
||||
# Create backend network (for internal services)
|
||||
if ! docker network ls | grep -q "ai-tax-agent-backend"; then
|
||||
docker network create ai-tax-agent-backend
|
||||
echo "✅ Created backend network: ai-tax-agent-backend"
|
||||
else
|
||||
echo "ℹ️ Backend network already exists: ai-tax-agent-backend"
|
||||
fi
|
||||
|
||||
echo "🎉 Network setup complete!"
|
||||
echo ""
|
||||
echo "Networks created:"
|
||||
docker network ls | grep "ai-tax-agent"
|
||||
54
scripts/debug-remote.sh
Normal file
54
scripts/debug-remote.sh
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Debug script for remote server issues
|
||||
|
||||
echo "=== Connecting to remote server ==="
|
||||
echo "Running diagnostics..."
|
||||
echo ""
|
||||
|
||||
ssh -t deploy@141.136.35.199 << 'ENDSSH'
|
||||
set -x
|
||||
|
||||
echo "=== 1. Check Docker is running ==="
|
||||
docker --version
|
||||
docker info | head -10
|
||||
|
||||
echo ""
|
||||
echo "=== 2. Check Docker images ==="
|
||||
docker images | head -20
|
||||
|
||||
echo ""
|
||||
echo "=== 3. Check if logged in to Gitea ==="
|
||||
cat ~/.docker/config.json 2>/dev/null || echo "No Docker config found"
|
||||
|
||||
echo ""
|
||||
echo "=== 4. Check Gitea container ==="
|
||||
docker ps | grep gitea || echo "Gitea not running"
|
||||
|
||||
echo ""
|
||||
echo "=== 5. Check recent Docker logs ==="
|
||||
docker ps -a --format "{{.Names}}" | head -5
|
||||
|
||||
echo ""
|
||||
echo "=== 6. Test Gitea registry connectivity ==="
|
||||
curl -I https://gitea.harkon.co.uk/v2/ 2>&1 | head -10
|
||||
|
||||
echo ""
|
||||
echo "=== 7. Check disk space ==="
|
||||
df -h | grep -E "Filesystem|/$"
|
||||
|
||||
echo ""
|
||||
echo "=== 8. Check if base-ml build is in progress ==="
|
||||
docker ps | grep build || echo "No build in progress"
|
||||
|
||||
echo ""
|
||||
echo "=== 9. Check Docker build logs (if any) ==="
|
||||
docker ps -a --filter "ancestor=gitea.harkon.co.uk/harkon/base-ml" --format "{{.ID}} {{.Status}}"
|
||||
|
||||
echo ""
|
||||
echo "=== 10. Try a simple docker login test ==="
|
||||
echo "Testing registry connectivity..."
|
||||
curl -v https://gitea.harkon.co.uk/v2/ 2>&1 | grep -E "HTTP|401|200"
|
||||
|
||||
ENDSSH
|
||||
|
||||
54
scripts/deploy-external.sh
Executable file
54
scripts/deploy-external.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy external services on production server
|
||||
# Usage: ./scripts/deploy-external.sh <service>
|
||||
|
||||
set -e
|
||||
|
||||
SERVICE=$1
|
||||
|
||||
if [ -z "$SERVICE" ]; then
|
||||
echo "Usage: $0 <service>"
|
||||
echo ""
|
||||
echo "Available services:"
|
||||
echo " traefik"
|
||||
echo " authentik"
|
||||
echo " gitea"
|
||||
echo " nextcloud"
|
||||
echo " portainer"
|
||||
echo " all"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
COMPOSE_DIR="$PROJECT_ROOT/infra/compose"
|
||||
|
||||
deploy_service() {
|
||||
local svc=$1
|
||||
echo "🚀 Deploying $svc..."
|
||||
|
||||
if [ ! -d "$COMPOSE_DIR/$svc" ]; then
|
||||
echo "❌ Service directory not found: $COMPOSE_DIR/$svc"
|
||||
return 1
|
||||
fi
|
||||
|
||||
cd "$COMPOSE_DIR/$svc"
|
||||
docker compose up -d
|
||||
echo "✅ $svc deployed"
|
||||
}
|
||||
|
||||
if [ "$SERVICE" = "all" ]; then
|
||||
deploy_service "traefik"
|
||||
sleep 5
|
||||
deploy_service "authentik"
|
||||
sleep 5
|
||||
deploy_service "gitea"
|
||||
deploy_service "nextcloud"
|
||||
deploy_service "portainer"
|
||||
else
|
||||
deploy_service "$SERVICE"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🎉 Deployment complete!"
|
||||
313
scripts/deploy-to-production.sh
Normal file
313
scripts/deploy-to-production.sh
Normal file
@@ -0,0 +1,313 @@
|
||||
#!/bin/bash
|
||||
# Deploy AI Tax Agent to Production Server
|
||||
# Usage: ./scripts/deploy-to-production.sh [step]
|
||||
# Steps: backup, prepare, infrastructure, services, monitoring, all
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
REMOTE_HOST="deploy@141.136.35.199"
|
||||
REMOTE_PATH="/opt/compose/ai-tax-agent"
|
||||
LOCAL_COMPOSE_PATH="infra/compose/production"
|
||||
ENV_FILE="infra/compose/.env.production"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Helper functions
|
||||
log_info() {
|
||||
echo -e "${BLUE}ℹ️ $1${NC}"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}✅ $1${NC}"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}⚠️ $1${NC}"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}❌ $1${NC}"
|
||||
}
|
||||
|
||||
# Check prerequisites
|
||||
check_prerequisites() {
|
||||
log_info "Checking prerequisites..."
|
||||
|
||||
if [ ! -f "$ENV_FILE" ]; then
|
||||
log_error "Production environment file not found: $ENV_FILE"
|
||||
log_info "Run: ./scripts/generate-production-secrets.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if grep -q "CHANGE_ME" "$ENV_FILE"; then
|
||||
log_error "Production environment file contains CHANGE_ME placeholders"
|
||||
log_info "Run: ./scripts/generate-production-secrets.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v ssh &> /dev/null; then
|
||||
log_error "ssh command not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_success "Prerequisites check passed"
|
||||
}
|
||||
|
||||
# Backup remote server
|
||||
backup_remote() {
|
||||
log_info "Creating backup on remote server..."
|
||||
|
||||
ssh $REMOTE_HOST << 'EOF'
|
||||
set -e
|
||||
mkdir -p ~/backups
|
||||
cd /opt/compose
|
||||
|
||||
# Backup compose directory (exclude large cert files)
|
||||
tar -czf ~/backups/backup-$(date +%Y%m%d-%H%M%S).tar.gz \
|
||||
--exclude='./traefik/certs/godaddy-acme.json' \
|
||||
--exclude='./*/node_modules' \
|
||||
.
|
||||
|
||||
# Document current state
|
||||
docker ps > ~/backups/current-services-$(date +%Y%m%d-%H%M%S).txt
|
||||
docker volume ls > ~/backups/current-volumes-$(date +%Y%m%d-%H%M%S).txt
|
||||
|
||||
echo "Backup created in ~/backups/"
|
||||
ls -lh ~/backups/ | tail -5
|
||||
EOF
|
||||
|
||||
log_success "Backup completed"
|
||||
}
|
||||
|
||||
# Prepare remote server
|
||||
prepare_remote() {
|
||||
log_info "Preparing remote server directory structure..."
|
||||
|
||||
ssh $REMOTE_HOST << EOF
|
||||
set -e
|
||||
|
||||
# Create application directory
|
||||
mkdir -p $REMOTE_PATH
|
||||
|
||||
# Create subdirectories for config files
|
||||
mkdir -p $REMOTE_PATH/prometheus
|
||||
mkdir -p $REMOTE_PATH/grafana/provisioning
|
||||
mkdir -p $REMOTE_PATH/grafana/dashboards
|
||||
mkdir -p $REMOTE_PATH/loki
|
||||
|
||||
echo "Directory structure created"
|
||||
ls -la $REMOTE_PATH
|
||||
EOF
|
||||
|
||||
log_success "Remote server prepared"
|
||||
}
|
||||
|
||||
# Copy files to remote server
|
||||
copy_files() {
|
||||
log_info "Copying compose files to remote server..."
|
||||
|
||||
# Copy compose files
|
||||
scp $LOCAL_COMPOSE_PATH/infrastructure.yaml $REMOTE_HOST:$REMOTE_PATH/
|
||||
scp $LOCAL_COMPOSE_PATH/services.yaml $REMOTE_HOST:$REMOTE_PATH/
|
||||
scp $LOCAL_COMPOSE_PATH/monitoring.yaml $REMOTE_HOST:$REMOTE_PATH/
|
||||
|
||||
# Copy environment file
|
||||
scp $ENV_FILE $REMOTE_HOST:$REMOTE_PATH/.env
|
||||
|
||||
# Copy configuration files
|
||||
scp -r infra/compose/prometheus/* $REMOTE_HOST:$REMOTE_PATH/prometheus/
|
||||
scp -r infra/compose/grafana/provisioning/* $REMOTE_HOST:$REMOTE_PATH/grafana/provisioning/
|
||||
scp -r infra/compose/grafana/dashboards/* $REMOTE_HOST:$REMOTE_PATH/grafana/dashboards/
|
||||
scp -r infra/compose/loki/* $REMOTE_HOST:$REMOTE_PATH/loki/
|
||||
|
||||
log_success "Files copied to remote server"
|
||||
}
|
||||
|
||||
# Deploy infrastructure
|
||||
deploy_infrastructure() {
|
||||
log_info "Deploying infrastructure services..."
|
||||
|
||||
ssh $REMOTE_HOST << EOF
|
||||
set -e
|
||||
cd $REMOTE_PATH
|
||||
|
||||
echo "Starting infrastructure services..."
|
||||
docker compose -f infrastructure.yaml up -d
|
||||
|
||||
echo "Waiting for services to be healthy..."
|
||||
sleep 30
|
||||
|
||||
echo "Infrastructure services status:"
|
||||
docker compose -f infrastructure.yaml ps
|
||||
EOF
|
||||
|
||||
log_success "Infrastructure deployed"
|
||||
}
|
||||
|
||||
# Deploy services
|
||||
deploy_services() {
|
||||
log_info "Deploying application services..."
|
||||
|
||||
ssh $REMOTE_HOST << EOF
|
||||
set -e
|
||||
cd $REMOTE_PATH
|
||||
|
||||
echo "Pulling latest images..."
|
||||
docker compose -f services.yaml pull || true
|
||||
|
||||
echo "Starting application services..."
|
||||
docker compose -f services.yaml up -d
|
||||
|
||||
echo "Waiting for services to start..."
|
||||
sleep 20
|
||||
|
||||
echo "Application services status:"
|
||||
docker compose -f services.yaml ps
|
||||
EOF
|
||||
|
||||
log_success "Application services deployed"
|
||||
}
|
||||
|
||||
# Deploy monitoring
|
||||
deploy_monitoring() {
|
||||
log_info "Deploying monitoring stack..."
|
||||
|
||||
ssh $REMOTE_HOST << EOF
|
||||
set -e
|
||||
cd $REMOTE_PATH
|
||||
|
||||
echo "Starting monitoring services..."
|
||||
docker compose -f monitoring.yaml up -d
|
||||
|
||||
echo "Waiting for services to start..."
|
||||
sleep 15
|
||||
|
||||
echo "Monitoring services status:"
|
||||
docker compose -f monitoring.yaml ps
|
||||
EOF
|
||||
|
||||
log_success "Monitoring stack deployed"
|
||||
}
|
||||
|
||||
# Verify deployment
|
||||
verify_deployment() {
|
||||
log_info "Verifying deployment..."
|
||||
|
||||
ssh $REMOTE_HOST << EOF
|
||||
set -e
|
||||
cd $REMOTE_PATH
|
||||
|
||||
echo "=== Infrastructure Services ==="
|
||||
docker compose -f infrastructure.yaml ps
|
||||
|
||||
echo ""
|
||||
echo "=== Application Services ==="
|
||||
docker compose -f services.yaml ps
|
||||
|
||||
echo ""
|
||||
echo "=== Monitoring Services ==="
|
||||
docker compose -f monitoring.yaml ps
|
||||
|
||||
echo ""
|
||||
echo "=== Docker Networks ==="
|
||||
docker network ls | grep -E "frontend|backend"
|
||||
|
||||
echo ""
|
||||
echo "=== Disk Usage ==="
|
||||
df -h | grep -E "Filesystem|/dev/sda"
|
||||
EOF
|
||||
|
||||
log_success "Deployment verification completed"
|
||||
}
|
||||
|
||||
# Show logs
|
||||
show_logs() {
|
||||
local service=$1
|
||||
log_info "Showing logs for $service..."
|
||||
|
||||
ssh $REMOTE_HOST << EOF
|
||||
cd $REMOTE_PATH
|
||||
docker compose -f services.yaml logs --tail=50 $service
|
||||
EOF
|
||||
}
|
||||
|
||||
# Main deployment flow
|
||||
deploy_all() {
|
||||
log_info "Starting full deployment to production..."
|
||||
|
||||
check_prerequisites
|
||||
backup_remote
|
||||
prepare_remote
|
||||
copy_files
|
||||
deploy_infrastructure
|
||||
|
||||
log_warning "Infrastructure deployed. Please verify services are healthy before continuing."
|
||||
read -p "Continue with application deployment? (y/n) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
log_warning "Deployment paused. Run './scripts/deploy-to-production.sh services' to continue."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
deploy_services
|
||||
deploy_monitoring
|
||||
verify_deployment
|
||||
|
||||
log_success "🎉 Deployment completed successfully!"
|
||||
log_info "Access your services at:"
|
||||
echo " - Application: https://app.harkon.co.uk"
|
||||
echo " - API: https://api.harkon.co.uk"
|
||||
echo " - Grafana: https://grafana.harkon.co.uk"
|
||||
echo " - Vault: https://vault.harkon.co.uk"
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
case "${1:-all}" in
|
||||
backup)
|
||||
backup_remote
|
||||
;;
|
||||
prepare)
|
||||
check_prerequisites
|
||||
prepare_remote
|
||||
copy_files
|
||||
;;
|
||||
infrastructure)
|
||||
deploy_infrastructure
|
||||
;;
|
||||
services)
|
||||
deploy_services
|
||||
;;
|
||||
monitoring)
|
||||
deploy_monitoring
|
||||
;;
|
||||
verify)
|
||||
verify_deployment
|
||||
;;
|
||||
logs)
|
||||
show_logs "${2:-svc-ingestion}"
|
||||
;;
|
||||
all)
|
||||
deploy_all
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {backup|prepare|infrastructure|services|monitoring|verify|logs|all}"
|
||||
echo ""
|
||||
echo "Steps:"
|
||||
echo " backup - Create backup of remote server"
|
||||
echo " prepare - Prepare remote server and copy files"
|
||||
echo " infrastructure - Deploy infrastructure services"
|
||||
echo " services - Deploy application services"
|
||||
echo " monitoring - Deploy monitoring stack"
|
||||
echo " verify - Verify deployment status"
|
||||
echo " logs [service] - Show logs for a service"
|
||||
echo " all - Run full deployment (default)"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
101
scripts/deploy.sh
Executable file
101
scripts/deploy.sh
Executable file
@@ -0,0 +1,101 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Comprehensive Deployment Script with Fixes
|
||||
# Handles the complete deployment process with all discovered fixes
|
||||
|
||||
set -e
|
||||
|
||||
COMPOSE_FILE="infra/compose/docker-compose.local.yml"
|
||||
|
||||
echo "🚀 Starting comprehensive deployment with fixes..."
|
||||
|
||||
# Step 1: Create networks
|
||||
echo "🌐 Creating Docker networks..."
|
||||
./scripts/create-networks.sh
|
||||
|
||||
# Step 2: Generate certificates
|
||||
echo "🔐 Generating development certificates..."
|
||||
./scripts/generate-dev-certs.sh
|
||||
|
||||
# Step 3: Start core infrastructure first
|
||||
echo "🏗️ Starting core infrastructure..."
|
||||
cd infra/compose
|
||||
docker compose -f docker-compose.local.yml up -d traefik postgres redis
|
||||
cd ../..
|
||||
|
||||
# Step 4: Wait for core services and fix database issues
|
||||
echo "⏳ Waiting for core services..."
|
||||
sleep 15
|
||||
./scripts/fix-database-issues.sh
|
||||
|
||||
# Step 5: Start Authentik components in order
|
||||
echo "🔐 Starting Authentik components..."
|
||||
cd infra/compose
|
||||
docker compose -f docker-compose.local.yml up -d authentik-db authentik-redis
|
||||
sleep 10
|
||||
docker compose -f docker-compose.local.yml up -d authentik-server
|
||||
sleep 15
|
||||
docker compose -f docker-compose.local.yml up -d authentik-worker authentik-outpost
|
||||
cd ../..
|
||||
|
||||
# Step 6: Start remaining infrastructure
|
||||
echo "🏗️ Starting remaining infrastructure..."
|
||||
cd infra/compose
|
||||
docker compose -f docker-compose.local.yml up -d vault neo4j qdrant minio prometheus grafana loki
|
||||
cd ../..
|
||||
|
||||
# Step 7: Wait and verify Authentik is healthy
|
||||
echo "⏳ Waiting for Authentik to be healthy..."
|
||||
timeout=120
|
||||
counter=0
|
||||
while [ "$(docker inspect --format='{{.State.Health.Status}}' authentik-server 2>/dev/null)" != "healthy" ]; do
|
||||
if [ $counter -ge $timeout ]; then
|
||||
echo "❌ Authentik server failed to become healthy within $timeout seconds"
|
||||
echo "📋 Checking logs..."
|
||||
docker compose -f infra/compose/docker-compose.local.yml logs --tail=10 authentik-server
|
||||
exit 1
|
||||
fi
|
||||
sleep 2
|
||||
counter=$((counter + 2))
|
||||
echo "⏳ Waiting for Authentik... ($counter/$timeout seconds)"
|
||||
done
|
||||
echo "✅ Authentik is healthy"
|
||||
|
||||
# Step 8: Start application services
|
||||
echo "🚀 Starting application services..."
|
||||
cd infra/compose
|
||||
docker compose -f docker-compose.local.yml up -d \
|
||||
svc-ingestion svc-extract svc-forms svc-hmrc svc-kg \
|
||||
svc-normalize-map svc-ocr svc-rag-indexer svc-rag-retriever \
|
||||
svc-reason svc-rpa svc-firm-connectors svc-coverage ui-review
|
||||
cd ../..
|
||||
|
||||
# Step 9: Start Unleash (may fail, but that's OK)
|
||||
echo "📊 Starting Unleash (may require manual configuration)..."
|
||||
cd infra/compose
|
||||
docker compose -f docker-compose.local.yml up -d unleash || echo "⚠️ Unleash failed to start - may need manual token configuration"
|
||||
cd ../..
|
||||
|
||||
# Step 10: Final verification
|
||||
echo "🔍 Running final verification..."
|
||||
sleep 10
|
||||
./scripts/verify-infra.sh || echo "⚠️ Some services may need additional configuration"
|
||||
|
||||
echo ""
|
||||
echo "🎉 Deployment complete!"
|
||||
echo ""
|
||||
echo "📋 Next steps:"
|
||||
echo " 1. Complete Authentik setup: https://auth.local/if/flow/initial-setup/"
|
||||
echo " 2. Configure applications in Authentik admin panel"
|
||||
echo " 3. Test protected services redirect to Authentik"
|
||||
echo ""
|
||||
echo "🌐 Available endpoints:"
|
||||
echo " • Traefik Dashboard: http://localhost:8080"
|
||||
echo " • Authentik: https://auth.local"
|
||||
echo " • Grafana: https://grafana.local"
|
||||
echo " • Review UI: https://review.local (requires Authentik setup)"
|
||||
echo ""
|
||||
echo "🔧 Troubleshooting:"
|
||||
echo " • Check logs: make logs"
|
||||
echo " • Check status: make status"
|
||||
echo " • Restart services: make restart"
|
||||
98
scripts/dev-up.sh
Executable file
98
scripts/dev-up.sh
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." && pwd)
|
||||
COMPOSE_DIR="$ROOT_DIR/infra/compose"
|
||||
|
||||
echo "🚀 Dev up: networks, certs, infra, services"
|
||||
|
||||
# 1) Ensure .env exists
|
||||
if [[ ! -f "$COMPOSE_DIR/.env" ]]; then
|
||||
cp "$COMPOSE_DIR/env.example" "$COMPOSE_DIR/.env"
|
||||
echo "📝 Created .env from template"
|
||||
fi
|
||||
|
||||
# 2) Read only needed values from .env (do not 'source' due to spaces)
|
||||
get_env() {
|
||||
local key="$1"; local def="${2-}"
|
||||
local line
|
||||
line=$(grep -E "^${key}=" "$COMPOSE_DIR/.env" | tail -n1 || true)
|
||||
if [[ -z "$line" ]]; then printf "%s" "$def"; return; fi
|
||||
printf "%s" "${line#*=}"
|
||||
}
|
||||
|
||||
DOMAIN=${DOMAIN:-$(get_env DOMAIN local)}
|
||||
AUTHENTIK_BOOTSTRAP_TOKEN=${AUTHENTIK_BOOTSTRAP_TOKEN:-$(get_env AUTHENTIK_BOOTSTRAP_TOKEN "")}
|
||||
AUTHENTIK_OUTPOST_TOKEN=${AUTHENTIK_OUTPOST_TOKEN:-$(get_env AUTHENTIK_OUTPOST_TOKEN "")}
|
||||
START_APP_SERVICES=${START_APP_SERVICES:-$(get_env START_APP_SERVICES true)}
|
||||
|
||||
# 3) Networks and certs
|
||||
bash "$ROOT_DIR/scripts/create-networks.sh"
|
||||
bash "$ROOT_DIR/scripts/generate-dev-certs.sh"
|
||||
|
||||
# 4) Bring up core infra (detached)
|
||||
echo "🏗️ Starting Traefik + core infra..."
|
||||
docker compose -f "$COMPOSE_DIR/docker-compose.local.yml" up -d \
|
||||
traefik authentik-db authentik-redis authentik-server authentik-worker \
|
||||
vault postgres neo4j qdrant minio redis prometheus grafana loki
|
||||
|
||||
# 5) Wait for Traefik, then Authentik (initial-setup or login)
|
||||
echo "⏳ Waiting for Traefik to respond..."
|
||||
for i in {1..60}; do
|
||||
code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080/ping || true)
|
||||
if [[ "$code" == "200" ]]; then echo "✅ Traefik reachable"; break; fi
|
||||
sleep 2
|
||||
if [[ "$i" == 60 ]]; then echo "❌ Traefik not ready"; exit 1; fi
|
||||
done
|
||||
|
||||
echo "⏳ Waiting for Authentik to respond..."
|
||||
AUTH_HOST="auth.${DOMAIN}"
|
||||
RESOLVE=(--resolve "${AUTH_HOST}:443:127.0.0.1")
|
||||
for i in {1..60}; do
|
||||
code_setup=$(curl -ks "${RESOLVE[@]}" -o /dev/null -w '%{http_code}' "https://${AUTH_HOST}/if/flow/initial-setup/" || true)
|
||||
code_login=$(curl -ks "${RESOLVE[@]}" -o /dev/null -w '%{http_code}' "https://${AUTH_HOST}/if/flow/default-authentication-flow/" || true)
|
||||
code_root=$(curl -ks "${RESOLVE[@]}" -o /dev/null -w '%{http_code}' "https://${AUTH_HOST}/" || true)
|
||||
# If initial-setup returns 404 but login/root are healthy, treat as ready (already initialized)
|
||||
if [[ "$code_setup" == "404" ]]; then
|
||||
if [[ "$code_login" =~ ^(200|302|401)$ || "$code_root" =~ ^(200|302|401)$ ]]; then
|
||||
echo "✅ Authentik reachable (initial setup not present)"; break
|
||||
fi
|
||||
fi
|
||||
# If any key flow says OK, proceed
|
||||
if [[ "$code_setup" =~ ^(200|302|401)$ || "$code_login" =~ ^(200|302|401)$ || "$code_root" =~ ^(200|302|401)$ ]]; then
|
||||
echo "✅ Authentik reachable"; break
|
||||
fi
|
||||
sleep 5
|
||||
if [[ "$i" == 60 ]]; then echo "❌ Authentik not ready"; exit 1; fi
|
||||
done
|
||||
|
||||
# 6) Setup Authentik (optional automated)
|
||||
if [[ -n "${AUTHENTIK_BOOTSTRAP_TOKEN:-}" ]]; then
|
||||
echo "🔧 Running Authentik setup with bootstrap token..."
|
||||
AUTHENTIK_API_TOKEN="$AUTHENTIK_BOOTSTRAP_TOKEN" DOMAIN="$DOMAIN" bash "$ROOT_DIR/scripts/setup-authentik.sh" || true
|
||||
else
|
||||
echo "ℹ️ No AUTHENTIK_BOOTSTRAP_TOKEN provided; skipping automated Authentik API setup"
|
||||
fi
|
||||
|
||||
# 7) Start Authentik outpost if token present
|
||||
if [[ -n "${AUTHENTIK_OUTPOST_TOKEN:-}" && "${AUTHENTIK_OUTPOST_TOKEN}" != "changeme" ]]; then
|
||||
echo "🔐 Starting Authentik outpost..."
|
||||
docker compose -f "$COMPOSE_DIR/docker-compose.local.yml" up -d authentik-outpost || true
|
||||
else
|
||||
echo "ℹ️ Set AUTHENTIK_OUTPOST_TOKEN in $COMPOSE_DIR/.env to start authentik-outpost"
|
||||
fi
|
||||
|
||||
# 8) Start application services (optional)
|
||||
if [[ "${START_APP_SERVICES:-true}" == "true" ]]; then
|
||||
echo "🚀 Starting application services..."
|
||||
docker compose -f "$COMPOSE_DIR/docker-compose.local.yml" up -d \
|
||||
svc-ingestion svc-extract svc-kg svc-rag-retriever svc-coverage \
|
||||
svc-firm-connectors svc-forms svc-hmrc svc-normalize-map svc-ocr \
|
||||
svc-rag-indexer svc-reason svc-rpa ui-review unleash || true
|
||||
fi
|
||||
|
||||
echo "🎉 Dev environment is up"
|
||||
echo "🔗 Traefik dashboard: http://localhost:8080"
|
||||
echo "🔐 Authentik: https://auth.${DOMAIN}"
|
||||
echo "📊 Grafana: https://grafana.${DOMAIN}"
|
||||
echo "📝 Review UI: https://review.${DOMAIN}"
|
||||
152
scripts/enable-gitea-registry.sh
Executable file
152
scripts/enable-gitea-registry.sh
Executable file
@@ -0,0 +1,152 @@
|
||||
#!/bin/bash
|
||||
# Enable Gitea Container Registry
|
||||
# This script configures Gitea to support Docker container registry
|
||||
|
||||
set -e
|
||||
|
||||
REMOTE_HOST="deploy@141.136.35.199"
|
||||
GITEA_PATH="/opt/compose/gitea"
|
||||
|
||||
echo "🔧 Enabling Gitea Container Registry..."
|
||||
|
||||
# Step 1: Add packages configuration to Gitea
|
||||
echo "📝 Step 1: Configuring Gitea packages..."
|
||||
|
||||
ssh $REMOTE_HOST << 'EOF'
|
||||
# Create custom configuration directory if it doesn't exist
|
||||
sudo mkdir -p /opt/compose/gitea/custom/conf
|
||||
|
||||
# Create or update custom app.ini with packages enabled
|
||||
sudo tee /opt/compose/gitea/custom/conf/app.ini > /dev/null << 'GITEA_CONFIG'
|
||||
[packages]
|
||||
ENABLED = true
|
||||
CHUNKED_UPLOAD_PATH = /data/gitea/tmp/package-upload
|
||||
|
||||
[packages.container]
|
||||
ENABLED = true
|
||||
GITEA_CONFIG
|
||||
|
||||
echo "✅ Gitea configuration created"
|
||||
EOF
|
||||
|
||||
# Step 2: Update Gitea compose file to mount custom config and add registry labels
|
||||
echo "📝 Step 2: Updating Gitea compose file..."
|
||||
|
||||
ssh $REMOTE_HOST << 'EOF'
|
||||
cd /opt/compose/gitea
|
||||
|
||||
# Backup current compose file
|
||||
sudo cp compose.yaml compose.yaml.backup
|
||||
|
||||
# Create updated compose file with registry support
|
||||
sudo tee compose.yaml > /dev/null << 'COMPOSE_FILE'
|
||||
---
|
||||
services:
|
||||
server:
|
||||
image: docker.io/gitea/gitea:1.24.5
|
||||
container_name: gitea-server
|
||||
env_file:
|
||||
- ./.env
|
||||
environment:
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
- GITEA__database__DB_TYPE=postgres
|
||||
- GITEA__database__HOST=${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}
|
||||
- GITEA__database__NAME=${POSTGRES_DB:-gitea}
|
||||
- GITEA__database__USER=${POSTGRES_USER:-gitea}
|
||||
- GITEA__database__PASSWD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD not set}
|
||||
- GITEA__server__SSH_PORT=2221
|
||||
- GITEA__server__ROOT_URL=https://gitea.harkon.co.uk
|
||||
- GITEA__packages__ENABLED=true
|
||||
- GITEA__packages__CHUNKED_UPLOAD_PATH=/data/gitea/tmp/package-upload
|
||||
networks:
|
||||
- frontend
|
||||
- backend
|
||||
volumes:
|
||||
- gitea-data:/data
|
||||
- ./custom/conf/app.ini:/data/gitea/conf/app.ini.custom:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "2221:22"
|
||||
depends_on:
|
||||
- db
|
||||
labels:
|
||||
# Main Gitea web interface
|
||||
- traefik.enable=true
|
||||
- traefik.http.services.gitea.loadbalancer.server.port=3000
|
||||
- traefik.http.services.gitea.loadbalancer.server.scheme=http
|
||||
- traefik.http.routers.gitea-https.entrypoints=websecure
|
||||
- traefik.http.routers.gitea-https.rule=Host(`gitea.harkon.co.uk`)
|
||||
- traefik.http.routers.gitea-https.tls=true
|
||||
- traefik.http.routers.gitea-https.tls.certresolver=godaddy
|
||||
- traefik.http.routers.gitea-https.service=gitea
|
||||
# Container Registry (same port, different subdomain)
|
||||
- traefik.http.routers.gitea-registry.entrypoints=websecure
|
||||
- traefik.http.routers.gitea-registry.rule=Host(`registry.harkon.co.uk`)
|
||||
- traefik.http.routers.gitea-registry.tls=true
|
||||
- traefik.http.routers.gitea-registry.tls.certresolver=godaddy
|
||||
- traefik.http.routers.gitea-registry.service=gitea
|
||||
restart: unless-stopped
|
||||
|
||||
db:
|
||||
image: docker.io/library/postgres:17.5
|
||||
container_name: gitea-db
|
||||
environment:
|
||||
- POSTGRES_USER=${POSTGRES_USER:-gitea}
|
||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD not set}
|
||||
- POSTGRES_DB=${POSTGRES_DB:-gitea}
|
||||
networks:
|
||||
- backend
|
||||
volumes:
|
||||
- gitea-db:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
gitea-data:
|
||||
driver: local
|
||||
gitea-db:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
frontend:
|
||||
external: true
|
||||
backend:
|
||||
external: true
|
||||
COMPOSE_FILE
|
||||
|
||||
echo "✅ Gitea compose file updated"
|
||||
EOF
|
||||
|
||||
# Step 3: Restart Gitea to apply changes
|
||||
echo "📝 Step 3: Restarting Gitea..."
|
||||
|
||||
ssh $REMOTE_HOST << 'EOF'
|
||||
cd /opt/compose/gitea
|
||||
docker compose down
|
||||
docker compose up -d
|
||||
|
||||
echo "⏳ Waiting for Gitea to start..."
|
||||
sleep 15
|
||||
|
||||
echo "✅ Gitea restarted"
|
||||
EOF
|
||||
|
||||
echo ""
|
||||
echo "✅ Gitea Container Registry enabled successfully!"
|
||||
echo ""
|
||||
echo "📋 Next steps:"
|
||||
echo "1. Verify DNS: dig registry.harkon.co.uk (should point to 141.136.35.199)"
|
||||
echo "2. Wait for SSL certificate (Traefik will auto-generate)"
|
||||
echo "3. Create Gitea access token:"
|
||||
echo " - Login to https://gitea.harkon.co.uk"
|
||||
echo " - Settings → Applications → Generate New Token"
|
||||
echo " - Select scope: write:package"
|
||||
echo "4. Login to registry:"
|
||||
echo " docker login registry.harkon.co.uk"
|
||||
echo " Username: <your-gitea-username>"
|
||||
echo " Password: <your-access-token>"
|
||||
echo ""
|
||||
echo "🔍 Check Gitea logs:"
|
||||
echo " ssh deploy@141.136.35.199 'docker logs gitea-server'"
|
||||
|
||||
36
scripts/fix-database-issues.sh
Executable file
36
scripts/fix-database-issues.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Fix Database Issues Script
|
||||
# Handles common database setup issues discovered during deployment
|
||||
|
||||
set -e
|
||||
|
||||
echo "🔧 Fixing database issues..."
|
||||
|
||||
# Wait for PostgreSQL to be ready
|
||||
echo "⏳ Waiting for PostgreSQL to be ready..."
|
||||
timeout=60
|
||||
counter=0
|
||||
while ! docker exec postgres pg_isready -U postgres >/dev/null 2>&1; do
|
||||
if [ $counter -ge $timeout ]; then
|
||||
echo "❌ PostgreSQL failed to start within $timeout seconds"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
counter=$((counter + 1))
|
||||
done
|
||||
echo "✅ PostgreSQL is ready"
|
||||
|
||||
# Create unleash database if it doesn't exist
|
||||
echo "📊 Creating unleash database if needed..."
|
||||
docker exec postgres psql -U postgres -tc "SELECT 1 FROM pg_database WHERE datname = 'unleash'" | grep -q 1 || \
|
||||
docker exec postgres psql -U postgres -c "CREATE DATABASE unleash;"
|
||||
echo "✅ Unleash database ready"
|
||||
|
||||
# Create tax_system database for Authentik if needed
|
||||
echo "🔐 Creating tax_system database for Authentik if needed..."
|
||||
docker exec postgres psql -U postgres -tc "SELECT 1 FROM pg_database WHERE datname = 'tax_system'" | grep -q 1 || \
|
||||
docker exec postgres psql -U postgres -c "CREATE DATABASE tax_system;"
|
||||
echo "✅ Authentik database ready"
|
||||
|
||||
echo "🎉 Database issues fixed!"
|
||||
152
scripts/fix-gitea-upload-limit.sh
Executable file
152
scripts/fix-gitea-upload-limit.sh
Executable file
@@ -0,0 +1,152 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to fix Gitea upload size limits for large Docker images
|
||||
# Run this on the remote server: ssh deploy@141.136.35.199
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== Gitea Registry Upload Limit Fix ==="
|
||||
echo ""
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Step 1: Check if Gitea is running
|
||||
echo -e "${YELLOW}Step 1: Checking Gitea status...${NC}"
|
||||
if docker ps | grep -q gitea-server; then
|
||||
echo -e "${GREEN}✓ Gitea is running${NC}"
|
||||
GITEA_CONTAINER=$(docker ps --filter "name=gitea" --format "{{.Names}}" | head -1)
|
||||
echo " Container: $GITEA_CONTAINER"
|
||||
else
|
||||
echo -e "${RED}✗ Gitea is not running!${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 2: Check if Traefik is running
|
||||
echo -e "\n${YELLOW}Step 2: Checking Traefik status...${NC}"
|
||||
if docker ps | grep -q traefik; then
|
||||
echo -e "${GREEN}✓ Traefik is running${NC}"
|
||||
TRAEFIK_CONTAINER=$(docker ps --filter "name=traefik" --format "{{.Names}}" | head -1)
|
||||
echo " Container: $TRAEFIK_CONTAINER"
|
||||
HAS_TRAEFIK=true
|
||||
else
|
||||
echo -e "${YELLOW}⚠ Traefik is not running (may not be needed)${NC}"
|
||||
HAS_TRAEFIK=false
|
||||
fi
|
||||
|
||||
# Step 3: Find Traefik config directory
|
||||
if [ "$HAS_TRAEFIK" = true ]; then
|
||||
echo -e "\n${YELLOW}Step 3: Finding Traefik configuration...${NC}"
|
||||
|
||||
# Try to find Traefik config mount
|
||||
TRAEFIK_CONFIG=$(docker inspect $TRAEFIK_CONTAINER | grep -A 1 '"Destination": "/etc/traefik"' | grep Source | cut -d'"' -f4 || echo "")
|
||||
|
||||
if [ -z "$TRAEFIK_CONFIG" ]; then
|
||||
TRAEFIK_CONFIG="/opt/traefik/config"
|
||||
echo -e "${YELLOW} Using default: $TRAEFIK_CONFIG${NC}"
|
||||
else
|
||||
echo -e "${GREEN} Found: $TRAEFIK_CONFIG${NC}"
|
||||
fi
|
||||
|
||||
# Create config directory if it doesn't exist
|
||||
sudo mkdir -p "$TRAEFIK_CONFIG"
|
||||
|
||||
# Step 4: Create Traefik middleware for large uploads
|
||||
echo -e "\n${YELLOW}Step 4: Creating Traefik middleware...${NC}"
|
||||
|
||||
sudo tee "$TRAEFIK_CONFIG/gitea-large-upload.yml" > /dev/null << 'EOF'
|
||||
http:
|
||||
middlewares:
|
||||
gitea-large-upload:
|
||||
buffering:
|
||||
maxRequestBodyBytes: 5368709120 # 5GB
|
||||
memRequestBodyBytes: 104857600 # 100MB in memory
|
||||
maxResponseBodyBytes: 5368709120 # 5GB
|
||||
memResponseBodyBytes: 104857600 # 100MB in memory
|
||||
retryExpression: "IsNetworkError() && Attempts() < 3"
|
||||
EOF
|
||||
|
||||
echo -e "${GREEN}✓ Created $TRAEFIK_CONFIG/gitea-large-upload.yml${NC}"
|
||||
|
||||
# Step 5: Restart Traefik
|
||||
echo -e "\n${YELLOW}Step 5: Restarting Traefik...${NC}"
|
||||
docker restart $TRAEFIK_CONTAINER
|
||||
sleep 3
|
||||
echo -e "${GREEN}✓ Traefik restarted${NC}"
|
||||
fi
|
||||
|
||||
# Step 6: Update Gitea configuration
|
||||
echo -e "\n${YELLOW}Step 6: Updating Gitea configuration...${NC}"
|
||||
|
||||
# Backup current config
|
||||
docker exec $GITEA_CONTAINER cp /data/gitea/conf/app.ini /data/gitea/conf/app.ini.backup
|
||||
echo -e "${GREEN}✓ Backed up app.ini${NC}"
|
||||
|
||||
# Check if settings already exist
|
||||
if docker exec $GITEA_CONTAINER grep -q "LFS_MAX_FILE_SIZE" /data/gitea/conf/app.ini; then
|
||||
echo -e "${YELLOW} LFS_MAX_FILE_SIZE already configured${NC}"
|
||||
else
|
||||
# Add LFS_MAX_FILE_SIZE to [server] section
|
||||
docker exec $GITEA_CONTAINER sh -c 'echo "LFS_MAX_FILE_SIZE = 5368709120" >> /data/gitea/conf/app.ini'
|
||||
echo -e "${GREEN}✓ Added LFS_MAX_FILE_SIZE${NC}"
|
||||
fi
|
||||
|
||||
# Check if packages section exists
|
||||
if docker exec $GITEA_CONTAINER grep -q "\[packages\]" /data/gitea/conf/app.ini; then
|
||||
echo -e "${YELLOW} [packages] section already exists${NC}"
|
||||
else
|
||||
# Add packages section
|
||||
docker exec $GITEA_CONTAINER sh -c 'cat >> /data/gitea/conf/app.ini << EOF
|
||||
|
||||
[packages]
|
||||
ENABLED = true
|
||||
CHUNKED_UPLOAD_PATH = /data/gitea/tmp/package-upload
|
||||
EOF'
|
||||
echo -e "${GREEN}✓ Added [packages] section${NC}"
|
||||
fi
|
||||
|
||||
# Step 7: Restart Gitea
|
||||
echo -e "\n${YELLOW}Step 7: Restarting Gitea...${NC}"
|
||||
docker restart $GITEA_CONTAINER
|
||||
sleep 5
|
||||
echo -e "${GREEN}✓ Gitea restarted${NC}"
|
||||
|
||||
# Step 8: Test registry endpoint
|
||||
echo -e "\n${YELLOW}Step 8: Testing registry endpoint...${NC}"
|
||||
RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" https://gitea.harkon.co.uk/v2/)
|
||||
|
||||
if [ "$RESPONSE" = "401" ] || [ "$RESPONSE" = "200" ]; then
|
||||
echo -e "${GREEN}✓ Registry is accessible (HTTP $RESPONSE)${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ Registry returned HTTP $RESPONSE${NC}"
|
||||
fi
|
||||
|
||||
# Step 9: Summary
|
||||
echo -e "\n${GREEN}=== Configuration Complete ===${NC}"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Log in to Gitea registry:"
|
||||
echo " docker login gitea.harkon.co.uk"
|
||||
echo ""
|
||||
echo "2. Test with a small image:"
|
||||
echo " docker pull alpine:latest"
|
||||
echo " docker tag alpine:latest gitea.harkon.co.uk/harkon/test:latest"
|
||||
echo " docker push gitea.harkon.co.uk/harkon/test:latest"
|
||||
echo ""
|
||||
echo "3. If successful, build and push base-ml:"
|
||||
echo " cd /home/deploy/ai-tax-agent"
|
||||
echo " docker build -f infra/docker/base-ml.Dockerfile -t gitea.harkon.co.uk/harkon/base-ml:v1.0.1 ."
|
||||
echo " docker push gitea.harkon.co.uk/harkon/base-ml:v1.0.1"
|
||||
echo ""
|
||||
|
||||
if [ "$HAS_TRAEFIK" = true ]; then
|
||||
echo -e "${YELLOW}⚠ IMPORTANT: You need to add this label to your Gitea container:${NC}"
|
||||
echo " traefik.http.routers.gitea.middlewares=gitea-large-upload@file"
|
||||
echo ""
|
||||
echo " Add it to your Gitea docker-compose.yml and restart:"
|
||||
echo " docker-compose up -d gitea"
|
||||
fi
|
||||
|
||||
28
scripts/generate-dev-certs.sh
Executable file
28
scripts/generate-dev-certs.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Generate self-signed TLS cert for local development
|
||||
# Outputs: infra/compose/traefik/certs/local.crt and local.key
|
||||
|
||||
CERT_DIR="infra/compose/traefik/certs"
|
||||
mkdir -p "$CERT_DIR"
|
||||
|
||||
CRT="$CERT_DIR/local.crt"
|
||||
KEY="$CERT_DIR/local.key"
|
||||
|
||||
if [[ -f "$CRT" && -f "$KEY" ]]; then
|
||||
echo "✅ Dev TLS certificate already exists at $CERT_DIR"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "🔐 Generating self-signed TLS certificate for local domains..."
|
||||
|
||||
SAN="DNS:localhost,IP:127.0.0.1,DNS:*.local.lan,DNS:auth.local.lan,DNS:grafana.local.lan,DNS:review.local.lan,DNS:api.local.lan,DNS:vault.local.lan,DNS:minio.local.lan,DNS:minio-api.local.lan,DNS:qdrant.local.lan,DNS:neo4j.local.lan,DNS:prometheus.local.lan,DNS:loki.local.lan,DNS:unleash.local.lan,DNS:traefik.local.lan"
|
||||
|
||||
openssl req -x509 -nodes -newkey rsa:2048 -sha256 -days 3650 \
|
||||
-subj "/CN=local" \
|
||||
-keyout "$KEY" \
|
||||
-out "$CRT" \
|
||||
-addext "subjectAltName=$SAN" >/dev/null 2>&1
|
||||
|
||||
echo "✅ Generated $CRT and $KEY"
|
||||
82
scripts/generate-production-secrets.sh
Executable file
82
scripts/generate-production-secrets.sh
Executable file
@@ -0,0 +1,82 @@
|
||||
#!/bin/bash
|
||||
# Generate strong secrets for production environment
|
||||
|
||||
set -e
|
||||
|
||||
ENV_FILE="infra/compose/.env.production"
|
||||
|
||||
if [ ! -f "$ENV_FILE" ]; then
|
||||
echo "❌ Error: $ENV_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🔐 Generating strong secrets for production..."
|
||||
|
||||
# Function to generate a strong password (alphanumeric only, no special chars)
|
||||
generate_password() {
|
||||
openssl rand -base64 32 | tr -d "=+/\n" | cut -c1-32
|
||||
}
|
||||
|
||||
# Function to generate a hex token
|
||||
generate_hex_token() {
|
||||
openssl rand -hex 32
|
||||
}
|
||||
|
||||
# Generate all secrets
|
||||
POSTGRES_PASSWORD=$(generate_password)
|
||||
NEO4J_PASSWORD=$(generate_password)
|
||||
AUTHENTIK_DB_PASSWORD=$(generate_password)
|
||||
MINIO_ROOT_PASSWORD=$(generate_password)
|
||||
MINIO_SECRET_KEY=$(generate_password)
|
||||
VAULT_ROOT_TOKEN=$(generate_hex_token)
|
||||
AUTHENTIK_SECRET_KEY=$(generate_password)
|
||||
AUTHENTIK_OUTPOST_TOKEN=$(generate_hex_token)
|
||||
ADMIN_PASSWORD=$(generate_password)
|
||||
GRAFANA_PASSWORD=$(generate_password)
|
||||
GRAFANA_OAUTH_SECRET=$(generate_password)
|
||||
API_CLIENT_SECRET=$(generate_password)
|
||||
UI_REVIEW_CLIENT_SECRET=$(generate_password)
|
||||
GRAFANA_CLIENT_SECRET=$(generate_password)
|
||||
MINIO_CLIENT_SECRET=$(generate_password)
|
||||
VAULT_CLIENT_SECRET=$(generate_password)
|
||||
NEXTAUTH_SECRET=$(generate_password)
|
||||
|
||||
# Create a backup
|
||||
cp "$ENV_FILE" "$ENV_FILE.backup"
|
||||
|
||||
# Use perl for more reliable replacement (works on macOS)
|
||||
perl -i -pe "s/CHANGE_ME_STRONG_PASSWORD_1/$POSTGRES_PASSWORD/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_STRONG_PASSWORD_2/$NEO4J_PASSWORD/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_STRONG_PASSWORD_3/$AUTHENTIK_DB_PASSWORD/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_STRONG_PASSWORD_4/$MINIO_ROOT_PASSWORD/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_STRONG_PASSWORD_5/$MINIO_SECRET_KEY/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_VAULT_ROOT_TOKEN/$VAULT_ROOT_TOKEN/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_AUTHENTIK_SECRET_KEY/$AUTHENTIK_SECRET_KEY/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_AUTHENTIK_OUTPOST_TOKEN/$AUTHENTIK_OUTPOST_TOKEN/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_ADMIN_PASSWORD/$ADMIN_PASSWORD/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_GRAFANA_PASSWORD/$GRAFANA_PASSWORD/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_GRAFANA_OAUTH_SECRET/$GRAFANA_OAUTH_SECRET/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_API_CLIENT_SECRET/$API_CLIENT_SECRET/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_UI_REVIEW_CLIENT_SECRET/$UI_REVIEW_CLIENT_SECRET/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_GRAFANA_CLIENT_SECRET/$GRAFANA_CLIENT_SECRET/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_MINIO_CLIENT_SECRET/$MINIO_CLIENT_SECRET/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_VAULT_CLIENT_SECRET/$VAULT_CLIENT_SECRET/g" "$ENV_FILE"
|
||||
perl -i -pe "s/CHANGE_ME_NEXTAUTH_SECRET/$NEXTAUTH_SECRET/g" "$ENV_FILE"
|
||||
|
||||
echo "✅ Secrets generated successfully!"
|
||||
echo ""
|
||||
echo "📝 Important credentials (save these securely!):"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "Admin Email: admin@harkon.co.uk"
|
||||
echo "Admin Password: $ADMIN_PASSWORD"
|
||||
echo "Vault Root Token: $VAULT_ROOT_TOKEN"
|
||||
echo "Grafana Password: $GRAFANA_PASSWORD"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
echo "⚠️ IMPORTANT:"
|
||||
echo "1. Save these credentials in a password manager"
|
||||
echo "2. The .env.production file contains all secrets"
|
||||
echo "3. Never commit .env.production to git"
|
||||
echo "4. A backup was created at $ENV_FILE.backup"
|
||||
echo ""
|
||||
echo "🔒 To view all secrets: cat $ENV_FILE"
|
||||
166
scripts/generate-secrets.sh
Executable file
166
scripts/generate-secrets.sh
Executable file
@@ -0,0 +1,166 @@
|
||||
#!/bin/bash
|
||||
# Generate secure secrets for AI Tax Agent deployment
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Function to generate random string
|
||||
generate_secret() {
|
||||
local length=${1:-32}
|
||||
openssl rand -base64 $length | tr -d "=+/" | cut -c1-$length
|
||||
}
|
||||
|
||||
# Function to generate UUID
|
||||
generate_uuid() {
|
||||
python3 -c "import uuid; print(uuid.uuid4())"
|
||||
}
|
||||
|
||||
echo -e "${BLUE}🔐 Generating secure secrets for AI Tax Agent...${NC}"
|
||||
echo
|
||||
|
||||
# Generate secrets
|
||||
AUTHENTIK_SECRET_KEY=$(generate_secret 50)
|
||||
AUTHENTIK_OUTPOST_TOKEN=$(generate_secret 64)
|
||||
AUTHENTIK_API_CLIENT_SECRET=$(generate_secret 32)
|
||||
AUTHENTIK_GRAFANA_CLIENT_SECRET=$(generate_secret 32)
|
||||
GRAFANA_OAUTH_CLIENT_SECRET=$(generate_secret 32)
|
||||
NEXTAUTH_SECRET=$(generate_secret 32)
|
||||
VAULT_DEV_ROOT_TOKEN_ID=$(generate_uuid)
|
||||
POSTGRES_PASSWORD=$(generate_secret 16)
|
||||
NEO4J_PASSWORD=$(generate_secret 16)
|
||||
AUTHENTIK_DB_PASSWORD=$(generate_secret 16)
|
||||
MINIO_ROOT_PASSWORD=$(generate_secret 16)
|
||||
GRAFANA_PASSWORD=$(generate_secret 16)
|
||||
|
||||
# Create .env file with generated secrets
|
||||
ENV_FILE="infra/compose/.env"
|
||||
BACKUP_FILE="infra/compose/.env.backup.$(date +%Y%m%d_%H%M%S)"
|
||||
|
||||
# Backup existing .env if it exists
|
||||
if [ -f "$ENV_FILE" ]; then
|
||||
echo -e "${YELLOW}📋 Backing up existing .env to $BACKUP_FILE${NC}"
|
||||
cp "$ENV_FILE" "$BACKUP_FILE"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}🔑 Generating new .env file with secure secrets...${NC}"
|
||||
|
||||
cat > "$ENV_FILE" << EOF
|
||||
# AI Tax Agent Environment Configuration
|
||||
# Generated on $(date)
|
||||
# IMPORTANT: Keep these secrets secure and never commit to version control
|
||||
|
||||
# Domain Configuration
|
||||
DOMAIN=local
|
||||
EMAIL=admin@local
|
||||
|
||||
# Database Passwords
|
||||
POSTGRES_PASSWORD=$POSTGRES_PASSWORD
|
||||
NEO4J_PASSWORD=$NEO4J_PASSWORD
|
||||
AUTHENTIK_DB_PASSWORD=$AUTHENTIK_DB_PASSWORD
|
||||
|
||||
# Object Storage
|
||||
MINIO_ROOT_USER=minio
|
||||
MINIO_ROOT_PASSWORD=$MINIO_ROOT_PASSWORD
|
||||
|
||||
# Vector Database
|
||||
QDRANT__SERVICE__GRPC_PORT=6334
|
||||
|
||||
# Secrets Management
|
||||
VAULT_DEV_ROOT_TOKEN_ID=$VAULT_DEV_ROOT_TOKEN_ID
|
||||
|
||||
# Identity & SSO
|
||||
AUTHENTIK_SECRET_KEY=$AUTHENTIK_SECRET_KEY
|
||||
AUTHENTIK_OUTPOST_TOKEN=$AUTHENTIK_OUTPOST_TOKEN
|
||||
AUTHENTIK_BOOTSTRAP_EMAIL=admin@local.lan
|
||||
AUTHENTIK_BOOTSTRAP_PASSWORD=admin123
|
||||
AUTHENTIK_BOOTSTRAP_TOKEN=ak-bootstrap-token
|
||||
AUTHENTIK_API_CLIENT_SECRET=$AUTHENTIK_API_CLIENT_SECRET
|
||||
AUTHENTIK_GRAFANA_CLIENT_SECRET=$AUTHENTIK_GRAFANA_CLIENT_SECRET
|
||||
|
||||
# OAuth Client Secrets
|
||||
GRAFANA_OAUTH_CLIENT_ID=grafana
|
||||
GRAFANA_OAUTH_CLIENT_SECRET=$GRAFANA_OAUTH_CLIENT_SECRET
|
||||
|
||||
# Monitoring
|
||||
GRAFANA_PASSWORD=$GRAFANA_PASSWORD
|
||||
|
||||
# Feature Flags
|
||||
UNLEASH_ADMIN_TOKEN=admin:development.unleash-insecure-admin-api-token
|
||||
|
||||
# Application Configuration
|
||||
NEXTAUTH_SECRET=$NEXTAUTH_SECRET
|
||||
|
||||
# RAG & ML Models
|
||||
RAG_EMBEDDING_MODEL=bge-small-en-v1.5
|
||||
RAG_RERANKER_MODEL=cross-encoder/ms-marco-MiniLM-L-6-v2
|
||||
RAG_ALPHA_BETA_GAMMA=0.5,0.3,0.2
|
||||
|
||||
# HMRC Integration
|
||||
HMRC_MTD_ITSA_MODE=sandbox
|
||||
|
||||
# Rate Limits
|
||||
RATE_LIMITS_HMRC_API_RPS=3
|
||||
RATE_LIMITS_HMRC_API_BURST=6
|
||||
RATE_LIMITS_LLM_API_RPS=10
|
||||
RATE_LIMITS_LLM_API_BURST=20
|
||||
|
||||
# Confidence Thresholds
|
||||
CONFIDENCE_AUTO_SUBMIT=0.95
|
||||
CONFIDENCE_HUMAN_REVIEW=0.85
|
||||
CONFIDENCE_REJECT=0.50
|
||||
|
||||
# Logging
|
||||
LOG_LEVEL=INFO
|
||||
LOG_FORMAT=json
|
||||
|
||||
# Development Settings
|
||||
DEBUG=false
|
||||
DEVELOPMENT_MODE=true
|
||||
|
||||
# Security
|
||||
ENCRYPTION_KEY_ID=default
|
||||
AUDIT_LOG_RETENTION_DAYS=90
|
||||
PII_LOG_RETENTION_DAYS=30
|
||||
|
||||
# Backup & DR
|
||||
BACKUP_ENABLED=true
|
||||
BACKUP_SCHEDULE=0 2 * * *
|
||||
BACKUP_RETENTION_DAYS=30
|
||||
|
||||
# Performance Tuning
|
||||
MAX_WORKERS=4
|
||||
BATCH_SIZE=100
|
||||
CACHE_TTL_SECONDS=3600
|
||||
CONNECTION_POOL_SIZE=20
|
||||
|
||||
# Feature Flags
|
||||
FEATURE_RAG_ENABLED=true
|
||||
FEATURE_FIRM_CONNECTORS_ENABLED=false
|
||||
FEATURE_HMRC_SUBMISSION_ENABLED=false
|
||||
FEATURE_ADVANCED_CALCULATIONS_ENABLED=true
|
||||
EOF
|
||||
|
||||
# Set secure permissions
|
||||
chmod 600 "$ENV_FILE"
|
||||
|
||||
echo -e "${GREEN}✅ Secrets generated successfully!${NC}"
|
||||
echo
|
||||
echo -e "${YELLOW}📝 Important credentials:${NC}"
|
||||
echo -e " ${BLUE}Grafana Admin:${NC} admin / $GRAFANA_PASSWORD"
|
||||
echo -e " ${BLUE}Authentik Admin:${NC} admin@local (set password on first login)"
|
||||
echo -e " ${BLUE}Vault Root Token:${NC} $VAULT_DEV_ROOT_TOKEN_ID"
|
||||
echo -e " ${BLUE}MinIO Admin:${NC} minio / $MINIO_ROOT_PASSWORD"
|
||||
echo
|
||||
echo -e "${RED}⚠️ SECURITY WARNING:${NC}"
|
||||
echo -e " • Keep the .env file secure and never commit it to version control"
|
||||
echo -e " • Change default passwords on first login"
|
||||
echo -e " • Use proper secrets management in production"
|
||||
echo -e " • Regularly rotate secrets"
|
||||
echo
|
||||
echo -e "${GREEN}🚀 Ready to deploy with: make deploy-infra${NC}"
|
||||
28
scripts/generate-tls-cert.sh
Executable file
28
scripts/generate-tls-cert.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Generate self-signed TLS cert for local development
|
||||
# Outputs: infra/compose/traefik/certs/local.crt and local.key
|
||||
|
||||
CERT_DIR="infra/compose/traefik/certs"
|
||||
mkdir -p "$CERT_DIR"
|
||||
|
||||
CRT="$CERT_DIR/local.crt"
|
||||
KEY="$CERT_DIR/local.key"
|
||||
|
||||
if [[ -f "$CRT" && -f "$KEY" ]]; then
|
||||
echo "✅ Dev TLS certificate already exists at $CERT_DIR"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "🔐 Generating self-signed TLS certificate for local domains..."
|
||||
|
||||
SAN="DNS:localhost,IP:127.0.0.1,DNS:*.local.lan,DNS:auth.local.lan,DNS:grafana.local.lan,DNS:review.local.lan,DNS:api.local.lan,DNS:vault.local.lan,DNS:minio.local.lan,DNS:minio-api.local.lan,DNS:qdrant.local.lan,DNS:neo4j.local.lan,DNS:prometheus.local.lan,DNS:loki.local.lan,DNS:unleash.local.lan,DNS:traefik.local.lan"
|
||||
|
||||
openssl req -x509 -nodes -newkey rsa:2048 -sha256 -days 3650 \
|
||||
-subj "/CN=local" \
|
||||
-keyout "$KEY" \
|
||||
-out "$CRT" \
|
||||
-addext "subjectAltName=$SAN" >/dev/null 2>&1
|
||||
|
||||
echo "✅ Generated $CRT and $KEY"
|
||||
75
scripts/health-check.sh
Executable file
75
scripts/health-check.sh
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Health Check Script
|
||||
# Quick health check for all services
|
||||
|
||||
set -e
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
DOMAIN="${DOMAIN:-harkon.co.uk}"
|
||||
|
||||
echo -e "${BLUE}AI Tax Agent - Health Check${NC}"
|
||||
echo -e "${BLUE}============================${NC}"
|
||||
echo ""
|
||||
|
||||
# Function to check endpoint
|
||||
check_endpoint() {
|
||||
local name=$1
|
||||
local url=$2
|
||||
local expected_code=${3:-200}
|
||||
|
||||
echo -n "Checking $name... "
|
||||
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" "$url" 2>/dev/null || echo "000")
|
||||
|
||||
if [ "$response" = "$expected_code" ] || [ "$response" = "200" ] || [ "$response" = "302" ]; then
|
||||
echo -e "${GREEN}✓ OK ($response)${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}✗ FAILED ($response)${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo -e "${YELLOW}Infrastructure Services:${NC}"
|
||||
check_endpoint "Vault" "https://vault.${DOMAIN}/v1/sys/health" "200"
|
||||
check_endpoint "MinIO Console" "https://minio-console.${DOMAIN}" "200"
|
||||
check_endpoint "Neo4j" "https://neo4j.${DOMAIN}" "200"
|
||||
check_endpoint "Qdrant" "https://qdrant.${DOMAIN}" "200"
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}Application Services:${NC}"
|
||||
check_endpoint "API Health" "https://api.${DOMAIN}/health" "200"
|
||||
check_endpoint "Ingestion" "https://api.${DOMAIN}/ingestion/health" "200"
|
||||
check_endpoint "Extract" "https://api.${DOMAIN}/extract/health" "200"
|
||||
check_endpoint "Knowledge Graph" "https://api.${DOMAIN}/kg/health" "200"
|
||||
check_endpoint "RAG Retriever" "https://api.${DOMAIN}/rag-retriever/health" "200"
|
||||
check_endpoint "RAG Indexer" "https://api.${DOMAIN}/rag-indexer/health" "200"
|
||||
check_endpoint "Forms" "https://api.${DOMAIN}/forms/health" "200"
|
||||
check_endpoint "HMRC" "https://api.${DOMAIN}/hmrc/health" "200"
|
||||
check_endpoint "OCR" "https://api.${DOMAIN}/ocr/health" "200"
|
||||
check_endpoint "RPA" "https://api.${DOMAIN}/rpa/health" "200"
|
||||
check_endpoint "Normalize Map" "https://api.${DOMAIN}/normalize-map/health" "200"
|
||||
check_endpoint "Reason" "https://api.${DOMAIN}/reason/health" "200"
|
||||
check_endpoint "Firm Connectors" "https://api.${DOMAIN}/firm-connectors/health" "200"
|
||||
check_endpoint "Coverage" "https://api.${DOMAIN}/coverage/health" "200"
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}UI:${NC}"
|
||||
check_endpoint "Review UI" "https://app.${DOMAIN}" "200"
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}Monitoring:${NC}"
|
||||
check_endpoint "Prometheus" "https://prometheus.${DOMAIN}/-/healthy" "200"
|
||||
check_endpoint "Grafana" "https://grafana.${DOMAIN}/api/health" "200"
|
||||
check_endpoint "Loki" "https://loki.${DOMAIN}/ready" "200"
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}Health check complete!${NC}"
|
||||
|
||||
55
scripts/ingest/heuristics.yaml
Normal file
55
scripts/ingest/heuristics.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
schedules:
|
||||
SA100:
|
||||
- "(?i)dividend(s)?|bank interest|savings income|gift aid|student loan|HICBC|child benefit"
|
||||
SA103:
|
||||
- "(?i)self[- ]?employment|sole trader|trading income|accounts|turnover|cash basis|simplified expenses"
|
||||
SA105:
|
||||
- "(?i)landlord|uk property|rental|letting|section 24|mortgage interest|furnished holiday letting|FHL"
|
||||
SA108:
|
||||
- "(?i)capital gain(s)?|CGT|disposal|share matching|bed and breakfast|section 104 pool|allowable cost|AEA"
|
||||
|
||||
pages:
|
||||
# page_id: [regexes that trigger it]
|
||||
SA103S:
|
||||
- "(?i)self[- ]?employment.*short|turnover.*below.*VAT threshold"
|
||||
SA103F:
|
||||
- "(?i)self[- ]?employment.*full|complex accounts|over.*VAT threshold"
|
||||
SA105:
|
||||
- "(?i)UK property|property income|rental"
|
||||
SA108:
|
||||
- "(?i)capital gains|SA108|disposals?"
|
||||
|
||||
fields:
|
||||
# stable field_id → {page_id, form_id?, box_number?, synonyms[]}
|
||||
"SA100:UK_Dividends_Total":
|
||||
form_id: "SA100"
|
||||
page_id: null
|
||||
box_number: null
|
||||
synonyms:
|
||||
- "(?i)UK dividends( total)?"
|
||||
- "(?i)enter.*total.*dividends"
|
||||
"SA100:UK_Interest_Total":
|
||||
form_id: "SA100"
|
||||
synonyms:
|
||||
- "(?i)UK interest( total)?"
|
||||
- "(?i)bank.*interest"
|
||||
"SA105:FinanceCostReducer":
|
||||
page_id: "SA105"
|
||||
form_id: "SA100"
|
||||
synonyms:
|
||||
- "(?i)mortgage interest|finance cost(s)?|section 24 reducer"
|
||||
"SA105:Rental_Income_Total":
|
||||
page_id: "SA105"
|
||||
form_id: "SA100"
|
||||
synonyms:
|
||||
- "(?i)total rents received|rental income|letting income"
|
||||
"SA108:Disposals_Summary":
|
||||
page_id: "SA108"
|
||||
form_id: "SA100"
|
||||
synonyms:
|
||||
- "(?i)disposal(s)? summary|total proceeds|capital gains summary"
|
||||
"SA108:Losses_Brought_Forward":
|
||||
page_id: "SA108"
|
||||
form_id: "SA100"
|
||||
synonyms:
|
||||
- "(?i)loss(es)? brought forward"
|
||||
110
scripts/remote-build-base-ml.sh
Executable file
110
scripts/remote-build-base-ml.sh
Executable file
@@ -0,0 +1,110 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Remote Build Script for base-ml Image
|
||||
# This script builds the base-ml image on the remote production server
|
||||
# to avoid pushing 1.2GB+ over the network from local machine
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging functions
|
||||
log_info() {
|
||||
echo -e "${BLUE}ℹ️ $1${NC}"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}✅ $1${NC}"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}⚠️ $1${NC}"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}❌ $1${NC}"
|
||||
}
|
||||
|
||||
# Configuration
|
||||
REMOTE_HOST="${1:-deploy@141.136.35.199}"
|
||||
REMOTE_DIR="${2:-/home/deploy/ai-tax-agent}"
|
||||
REGISTRY="${3:-gitea.harkon.co.uk}"
|
||||
VERSION="${4:-v1.0.1}"
|
||||
OWNER="${5:-harkon}"
|
||||
|
||||
log_info "Remote Build Configuration"
|
||||
echo " Remote Host: $REMOTE_HOST"
|
||||
echo " Remote Directory: $REMOTE_DIR"
|
||||
echo " Registry: $REGISTRY"
|
||||
echo " Owner: $OWNER"
|
||||
echo " Version: $VERSION"
|
||||
echo ""
|
||||
|
||||
# Step 1: Check if remote directory exists
|
||||
log_info "Checking remote directory..."
|
||||
if ! ssh "$REMOTE_HOST" "[ -d $REMOTE_DIR ]"; then
|
||||
log_error "Remote directory $REMOTE_DIR does not exist!"
|
||||
log_info "Creating remote directory..."
|
||||
ssh "$REMOTE_HOST" "mkdir -p $REMOTE_DIR"
|
||||
fi
|
||||
log_success "Remote directory exists"
|
||||
|
||||
# Step 2: Sync code to remote server
|
||||
log_info "Syncing code to remote server..."
|
||||
rsync -avz --exclude='.git' \
|
||||
--exclude='__pycache__' \
|
||||
--exclude='*.pyc' \
|
||||
--exclude='.venv' \
|
||||
--exclude='venv' \
|
||||
--exclude='node_modules' \
|
||||
--exclude='.pytest_cache' \
|
||||
--exclude='.mypy_cache' \
|
||||
--exclude='.ruff_cache' \
|
||||
--exclude='*.egg-info' \
|
||||
--exclude='.DS_Store' \
|
||||
./ "$REMOTE_HOST:$REMOTE_DIR/"
|
||||
log_success "Code synced to remote server"
|
||||
|
||||
# Step 3: Build base-ml on remote
|
||||
log_info "Building base-ml image on remote server..."
|
||||
log_warning "This will take 10-15 minutes (installing ML dependencies)..."
|
||||
|
||||
ssh "$REMOTE_HOST" << 'ENDSSH'
|
||||
set -e
|
||||
cd /home/deploy/ai-tax-agent
|
||||
|
||||
# Build base-ml image
|
||||
echo "Building base-ml image..."
|
||||
docker build \
|
||||
-f infra/docker/base-ml.Dockerfile \
|
||||
-t gitea.harkon.co.uk/harkon/base-ml:v1.0.1 \
|
||||
-t gitea.harkon.co.uk/harkon/base-ml:latest \
|
||||
.
|
||||
|
||||
# Push to registry
|
||||
echo "Pushing base-ml image to registry..."
|
||||
docker push gitea.harkon.co.uk/harkon/base-ml:v1.0.1
|
||||
docker push gitea.harkon.co.uk/harkon/base-ml:latest
|
||||
|
||||
# Show image size
|
||||
echo ""
|
||||
echo "=== Base ML Image Built ==="
|
||||
docker images | grep "base-ml"
|
||||
echo ""
|
||||
ENDSSH
|
||||
|
||||
log_success "base-ml image built and pushed from remote server!"
|
||||
|
||||
# Step 4: Verify image is available
|
||||
log_info "Verifying image is available in registry..."
|
||||
log_info "You can check at: https://$REGISTRY/$OWNER/-/packages/container/base-ml"
|
||||
|
||||
log_success "Done! base-ml image is ready to use."
|
||||
log_info "Next steps:"
|
||||
echo " 1. Pull base-ml locally (optional): docker pull $REGISTRY/$OWNER/base-ml:$VERSION"
|
||||
echo " 2. Build ML services: ./scripts/build-and-push-images.sh $REGISTRY $VERSION $OWNER"
|
||||
66
scripts/remote-debug-commands.txt
Normal file
66
scripts/remote-debug-commands.txt
Normal file
@@ -0,0 +1,66 @@
|
||||
# Remote Server Debug Commands
|
||||
# Copy and paste these commands on the remote server (ssh deploy@141.136.35.199)
|
||||
|
||||
# 1. Check Docker is running
|
||||
echo "=== Docker Version ==="
|
||||
docker --version
|
||||
docker info | head -20
|
||||
|
||||
# 2. Check Docker images
|
||||
echo -e "\n=== Docker Images ==="
|
||||
docker images | head -20
|
||||
|
||||
# 3. Check if logged in to Gitea
|
||||
echo -e "\n=== Docker Login Status ==="
|
||||
cat ~/.docker/config.json 2>/dev/null || echo "Not logged in to any registry"
|
||||
|
||||
# 4. Check Gitea container status
|
||||
echo -e "\n=== Gitea Container ==="
|
||||
docker ps -a | grep gitea
|
||||
|
||||
# 5. Check Gitea logs for errors
|
||||
echo -e "\n=== Gitea Recent Logs ==="
|
||||
docker logs --tail 50 gitea-server 2>&1 | grep -i error || echo "No errors in recent logs"
|
||||
|
||||
# 6. Test Gitea registry endpoint
|
||||
echo -e "\n=== Gitea Registry Endpoint Test ==="
|
||||
curl -I https://gitea.harkon.co.uk/v2/ 2>&1
|
||||
|
||||
# 7. Check disk space
|
||||
echo -e "\n=== Disk Space ==="
|
||||
df -h
|
||||
|
||||
# 8. Check if there's a build in progress
|
||||
echo -e "\n=== Docker Build Processes ==="
|
||||
ps aux | grep "docker build" | grep -v grep || echo "No docker build in progress"
|
||||
|
||||
# 9. Check Docker daemon logs
|
||||
echo -e "\n=== Docker Daemon Status ==="
|
||||
sudo systemctl status docker | head -20
|
||||
|
||||
# 10. Try to push a small test image
|
||||
echo -e "\n=== Test Docker Push ==="
|
||||
docker pull alpine:latest
|
||||
docker tag alpine:latest gitea.harkon.co.uk/harkon/test:latest
|
||||
docker push gitea.harkon.co.uk/harkon/test:latest
|
||||
|
||||
# 11. Check Gitea app.ini for upload limits
|
||||
echo -e "\n=== Gitea Upload Limits ==="
|
||||
docker exec gitea-server cat /data/gitea/conf/app.ini | grep -A 5 -i "max.*size" || echo "Could not read Gitea config"
|
||||
|
||||
# 12. Check if base-ml image exists
|
||||
echo -e "\n=== Base ML Image Status ==="
|
||||
docker images | grep base-ml
|
||||
|
||||
# 13. Check recent Docker push attempts
|
||||
echo -e "\n=== Recent Docker Events ==="
|
||||
docker events --since 1h --filter 'type=image' --filter 'event=push' || echo "No recent push events"
|
||||
|
||||
# 14. Check network connectivity to Gitea
|
||||
echo -e "\n=== Network Test to Gitea ==="
|
||||
ping -c 3 gitea.harkon.co.uk
|
||||
|
||||
# 15. Check if Traefik is running and configured
|
||||
echo -e "\n=== Traefik Status ==="
|
||||
docker ps | grep traefik
|
||||
|
||||
140
scripts/rollback-deployment.sh
Executable file
140
scripts/rollback-deployment.sh
Executable file
@@ -0,0 +1,140 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Rollback Deployment Script
|
||||
# Restores previous deployment from backup
|
||||
|
||||
set -e
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
REMOTE_USER="${REMOTE_USER:-deploy}"
|
||||
REMOTE_HOST="${REMOTE_HOST:-141.136.35.199}"
|
||||
REMOTE_DIR="/opt/ai-tax-agent"
|
||||
|
||||
echo -e "${RED}========================================${NC}"
|
||||
echo -e "${RED}AI Tax Agent - Deployment Rollback${NC}"
|
||||
echo -e "${RED}========================================${NC}"
|
||||
echo ""
|
||||
|
||||
# Confirm rollback
|
||||
echo -e "${YELLOW}WARNING: This will rollback to the previous deployment!${NC}"
|
||||
echo -e "${YELLOW}All current changes will be lost.${NC}"
|
||||
echo ""
|
||||
read -p "Are you sure you want to rollback? (yes/no): " confirm
|
||||
|
||||
if [ "$confirm" != "yes" ]; then
|
||||
echo -e "${BLUE}Rollback cancelled.${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}Step 1: Listing available backups${NC}"
|
||||
echo "-----------------------------------"
|
||||
|
||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "ls -lht ${REMOTE_DIR}/backups/ | head -10"
|
||||
|
||||
echo ""
|
||||
read -p "Enter backup timestamp to restore (e.g., 20250104_120000): " backup_timestamp
|
||||
|
||||
if [ -z "$backup_timestamp" ]; then
|
||||
echo -e "${RED}No backup timestamp provided. Exiting.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BACKUP_DIR="${REMOTE_DIR}/backups/${backup_timestamp}"
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}Step 2: Verifying backup exists${NC}"
|
||||
echo "--------------------------------"
|
||||
|
||||
if ! ssh ${REMOTE_USER}@${REMOTE_HOST} "[ -d ${BACKUP_DIR} ]"; then
|
||||
echo -e "${RED}Backup directory not found: ${BACKUP_DIR}${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓ Backup found${NC}"
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}Step 3: Stopping current services${NC}"
|
||||
echo "----------------------------------"
|
||||
|
||||
ssh ${REMOTE_USER}@${REMOTE_HOST} << 'EOF'
|
||||
cd /opt/ai-tax-agent
|
||||
docker compose -f compose/production/services.yaml down
|
||||
docker compose -f compose/production/infrastructure.yaml down
|
||||
docker compose -f compose/production/monitoring.yaml down
|
||||
EOF
|
||||
|
||||
echo -e "${GREEN}✓ Services stopped${NC}"
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}Step 4: Restoring configuration files${NC}"
|
||||
echo "--------------------------------------"
|
||||
|
||||
ssh ${REMOTE_USER}@${REMOTE_HOST} << EOF
|
||||
# Restore compose files
|
||||
cp ${BACKUP_DIR}/infrastructure.yaml ${REMOTE_DIR}/compose/production/
|
||||
cp ${BACKUP_DIR}/services.yaml ${REMOTE_DIR}/compose/production/
|
||||
cp ${BACKUP_DIR}/monitoring.yaml ${REMOTE_DIR}/compose/production/
|
||||
|
||||
# Restore environment file
|
||||
cp ${BACKUP_DIR}/.env.production ${REMOTE_DIR}/compose/
|
||||
|
||||
# Restore Traefik config if exists
|
||||
if [ -f ${BACKUP_DIR}/traefik-dynamic.yml ]; then
|
||||
cp ${BACKUP_DIR}/traefik-dynamic.yml /opt/compose/traefik/config/
|
||||
fi
|
||||
EOF
|
||||
|
||||
echo -e "${GREEN}✓ Configuration restored${NC}"
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}Step 5: Restarting services${NC}"
|
||||
echo "---------------------------"
|
||||
|
||||
ssh ${REMOTE_USER}@${REMOTE_HOST} << 'EOF'
|
||||
cd /opt/ai-tax-agent
|
||||
|
||||
# Start infrastructure first
|
||||
docker compose -f compose/production/infrastructure.yaml up -d
|
||||
|
||||
# Wait for infrastructure to be ready
|
||||
echo "Waiting 30 seconds for infrastructure to initialize..."
|
||||
sleep 30
|
||||
|
||||
# Start application services
|
||||
docker compose -f compose/production/services.yaml up -d
|
||||
|
||||
# Wait for services to start
|
||||
echo "Waiting 20 seconds for services to start..."
|
||||
sleep 20
|
||||
|
||||
# Start monitoring
|
||||
docker compose -f compose/production/monitoring.yaml up -d
|
||||
EOF
|
||||
|
||||
echo -e "${GREEN}✓ Services restarted${NC}"
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}Step 6: Verifying deployment${NC}"
|
||||
echo "----------------------------"
|
||||
|
||||
# Check running containers
|
||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "docker ps --format 'table {{.Names}}\t{{.Status}}' | grep -E '(vault|minio|postgres|svc-)'"
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}========================================${NC}"
|
||||
echo -e "${GREEN}Rollback Complete${NC}"
|
||||
echo -e "${GREEN}========================================${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Next Steps:${NC}"
|
||||
echo "1. Verify services are running: ./scripts/verify-deployment.sh"
|
||||
echo "2. Check application: https://app.harkon.co.uk"
|
||||
echo "3. Review logs if needed: ssh ${REMOTE_USER}@${REMOTE_HOST} 'docker logs <container>'"
|
||||
echo ""
|
||||
|
||||
249
scripts/setup-authentik.sh
Executable file
249
scripts/setup-authentik.sh
Executable file
@@ -0,0 +1,249 @@
|
||||
#!/bin/bash
|
||||
# Setup Authentik SSO for AI Tax Agent using Blueprint Import
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
DOMAIN=${DOMAIN:-local}
|
||||
AUTHENTIK_URL="https://auth.${DOMAIN}"
|
||||
AUTHENTIK_API_URL="$AUTHENTIK_URL/api/v3"
|
||||
ADMIN_EMAIL="admin@local"
|
||||
ADMIN_PASSWORD="${AUTHENTIK_ADMIN_PASSWORD:-admin123}"
|
||||
BOOTSTRAP_FILE="infra/compose/authentik/bootstrap.yaml"
|
||||
|
||||
echo -e "${BLUE}🔧 Setting up Authentik SSO for AI Tax Agent using Blueprint Import...${NC}"
|
||||
echo
|
||||
|
||||
# Function to wait for Authentik to be ready
|
||||
wait_for_authentik() {
|
||||
echo -e "${YELLOW}⏳ Waiting for Authentik to be ready...${NC}"
|
||||
local max_attempts=60
|
||||
local attempt=1
|
||||
local host
|
||||
host=$(echo "$AUTHENTIK_URL" | sed -E 's#^https?://([^/]+).*$#\1#')
|
||||
local resolve=(--resolve "${host}:443:127.0.0.1")
|
||||
|
||||
while [ $attempt -le $max_attempts ]; do
|
||||
code_setup=$(curl -ks "${resolve[@]}" -o /dev/null -w '%{http_code}' "$AUTHENTIK_URL/if/flow/initial-setup/" || true)
|
||||
code_login=$(curl -ks "${resolve[@]}" -o /dev/null -w '%{http_code}' "$AUTHENTIK_URL/if/flow/default-authentication-flow/" || true)
|
||||
code_root=$(curl -ks "${resolve[@]}" -o /dev/null -w '%{http_code}' "$AUTHENTIK_URL/" || true)
|
||||
if [[ "$code_setup" == "404" ]]; then
|
||||
if [[ "$code_login" =~ ^(200|302|401)$ || "$code_root" =~ ^(200|302|401)$ ]]; then
|
||||
echo -e "${GREEN}✅ Authentik is ready!${NC}"; return 0
|
||||
fi
|
||||
fi
|
||||
if [[ "$code_setup" =~ ^(200|302|401)$ || "$code_login" =~ ^(200|302|401)$ || "$code_root" =~ ^(200|302|401)$ ]]; then
|
||||
echo -e "${GREEN}✅ Authentik is ready!${NC}"; return 0
|
||||
fi
|
||||
echo -n "."
|
||||
sleep 5
|
||||
((attempt++))
|
||||
done
|
||||
|
||||
echo -e "${RED}❌ Authentik failed to start within expected time${NC}"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Function to generate secrets
|
||||
generate_secrets() {
|
||||
echo -e "${YELLOW}🔑 Generating secure secrets...${NC}"
|
||||
|
||||
# Generate client secrets if not already set
|
||||
if [ -z "${AUTHENTIK_API_CLIENT_SECRET:-}" ]; then
|
||||
export AUTHENTIK_API_CLIENT_SECRET=$(openssl rand -base64 32 | tr -d '=+/')
|
||||
echo "Generated API client secret"
|
||||
fi
|
||||
|
||||
if [ -z "${AUTHENTIK_GRAFANA_CLIENT_SECRET:-}" ]; then
|
||||
export AUTHENTIK_GRAFANA_CLIENT_SECRET=$(openssl rand -base64 32 | tr -d '=+/')
|
||||
echo "Generated Grafana client secret"
|
||||
fi
|
||||
|
||||
if [ -z "${AUTHENTIK_SECRET_KEY:-}" ]; then
|
||||
export AUTHENTIK_SECRET_KEY=$(openssl rand -base64 50 | tr -d '=+/')
|
||||
echo "Generated Authentik secret key"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✅ Secrets generated${NC}"
|
||||
}
|
||||
|
||||
# Function to get API token
|
||||
get_api_token() {
|
||||
echo -e "${YELLOW}🔑 Getting API token...${NC}"
|
||||
|
||||
# Use bootstrap token if available
|
||||
if [ -n "${AUTHENTIK_BOOTSTRAP_TOKEN:-}" ]; then
|
||||
echo "$AUTHENTIK_BOOTSTRAP_TOKEN"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Try to get token via API (requires manual setup first)
|
||||
local token_response
|
||||
token_response=$(curl -s -X POST "$AUTHENTIK_API_URL/core/tokens/" \
|
||||
-H "Content-Type: application/json" \
|
||||
-u "$ADMIN_EMAIL:$ADMIN_PASSWORD" \
|
||||
-d '{
|
||||
"identifier": "ai-tax-agent-setup",
|
||||
"description": "Setup token for AI Tax Agent",
|
||||
"expires": "2025-12-31T23:59:59Z"
|
||||
}' 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$token_response" ]; then
|
||||
echo "$token_response" | python3 -c "import sys, json; print(json.load(sys.stdin)['key'])" 2>/dev/null || echo ""
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to import blueprint
|
||||
import_blueprint() {
|
||||
local token="$1"
|
||||
|
||||
echo -e "${YELLOW}📋 Importing Authentik blueprint...${NC}"
|
||||
|
||||
if [ ! -f "$BOOTSTRAP_FILE" ]; then
|
||||
echo -e "${RED}❌ Bootstrap file not found: $BOOTSTRAP_FILE${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Create blueprint instance
|
||||
local blueprint_response
|
||||
blueprint_response=$(curl -s -X POST "$AUTHENTIK_API_URL/managed/blueprints/" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $token" \
|
||||
-d '{
|
||||
"name": "AI Tax Agent Bootstrap",
|
||||
"path": "/blueprints/bootstrap.yaml",
|
||||
"context": {},
|
||||
"enabled": true
|
||||
}' 2>/dev/null || echo "")
|
||||
|
||||
local blueprint_pk
|
||||
blueprint_pk=$(echo "$blueprint_response" | python3 -c "import sys, json; print(json.load(sys.stdin).get('pk', ''))" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$blueprint_pk" ]; then
|
||||
echo -e "${GREEN}✅ Blueprint created with ID: $blueprint_pk${NC}"
|
||||
|
||||
# Apply the blueprint
|
||||
echo -e "${YELLOW}🔄 Applying blueprint...${NC}"
|
||||
local apply_response
|
||||
apply_response=$(curl -s -X POST "$AUTHENTIK_API_URL/managed/blueprints/$blueprint_pk/apply/" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $token" \
|
||||
-d '{}' 2>/dev/null || echo "")
|
||||
|
||||
if echo "$apply_response" | grep -q "success\|applied" 2>/dev/null; then
|
||||
echo -e "${GREEN}✅ Blueprint applied successfully${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠️ Blueprint application may have had issues. Check Authentik logs.${NC}"
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}❌ Failed to create blueprint${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check blueprint status
|
||||
check_blueprint_status() {
|
||||
local token="$1"
|
||||
|
||||
echo -e "${YELLOW}🔍 Checking blueprint status...${NC}"
|
||||
|
||||
local blueprints_response
|
||||
blueprints_response=$(curl -s -X GET "$AUTHENTIK_API_URL/managed/blueprints/" \
|
||||
-H "Authorization: Bearer $token" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$blueprints_response" ]; then
|
||||
echo "$blueprints_response" | python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
data = json.load(sys.stdin)
|
||||
for bp in data.get('results', []):
|
||||
print(f\"Blueprint: {bp['name']} - Status: {'Enabled' if bp['enabled'] else 'Disabled'}\")
|
||||
except:
|
||||
print('Could not parse blueprint status')
|
||||
" 2>/dev/null || echo "Could not check blueprint status"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Main setup function
|
||||
main() {
|
||||
# Generate secrets first
|
||||
generate_secrets
|
||||
|
||||
# Check if Authentik is running
|
||||
if ! wait_for_authentik; then
|
||||
echo -e "${RED}❌ Authentik is not accessible. Please ensure it's running.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if initial setup is needed
|
||||
local host
|
||||
host=$(echo "$AUTHENTIK_URL" | sed -E 's#^https?://([^/]+).*$#\1#')
|
||||
local resolve=(--resolve "${host}:443:127.0.0.1")
|
||||
local setup_code
|
||||
setup_code=$(curl -ks "${resolve[@]}" -o /dev/null -w '%{http_code}' "$AUTHENTIK_URL/if/flow/initial-setup/" || true)
|
||||
|
||||
if [[ "$setup_code" == "200" ]]; then
|
||||
echo -e "${YELLOW}📋 Initial Authentik setup required:${NC}"
|
||||
echo -e " 1. Open ${BLUE}https://auth.local/if/flow/initial-setup/${NC}"
|
||||
echo -e " 2. Complete the setup wizard with admin user"
|
||||
echo -e " 3. Re-run this script after setup is complete"
|
||||
echo
|
||||
echo -e "${BLUE}💡 Tip: Use these credentials:${NC}"
|
||||
echo -e " • Email: ${BLUE}$ADMIN_EMAIL${NC}"
|
||||
echo -e " • Password: ${BLUE}$ADMIN_PASSWORD${NC}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Try to get API token
|
||||
local api_token
|
||||
api_token=$(get_api_token)
|
||||
|
||||
if [ -n "$api_token" ]; then
|
||||
echo -e "${GREEN}🔑 API token obtained, proceeding with blueprint import...${NC}"
|
||||
|
||||
# Import the blueprint configuration
|
||||
if import_blueprint "$api_token"; then
|
||||
echo -e "${GREEN}🎉 Authentik configuration imported successfully!${NC}"
|
||||
|
||||
# Check status
|
||||
check_blueprint_status "$api_token"
|
||||
|
||||
# Display client secrets for configuration
|
||||
echo
|
||||
echo -e "${BLUE}🔑 Client Secrets (save these for service configuration):${NC}"
|
||||
echo -e " • API Client Secret: ${YELLOW}${AUTHENTIK_API_CLIENT_SECRET}${NC}"
|
||||
echo -e " • Grafana Client Secret: ${YELLOW}${AUTHENTIK_GRAFANA_CLIENT_SECRET}${NC}"
|
||||
|
||||
else
|
||||
echo -e "${RED}❌ Blueprint import failed${NC}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}📋 Could not obtain API token. Manual configuration required:${NC}"
|
||||
echo -e " 1. Open ${BLUE}https://auth.local${NC} and log in as admin"
|
||||
echo -e " 2. Go to Admin Interface > Tokens"
|
||||
echo -e " 3. Create a new token and set AUTHENTIK_BOOTSTRAP_TOKEN in .env"
|
||||
echo -e " 4. Re-run this script"
|
||||
fi
|
||||
|
||||
echo
|
||||
echo -e "${BLUE}🔗 Access URLs:${NC}"
|
||||
echo -e " • Authentik Admin: ${BLUE}https://auth.local${NC}"
|
||||
echo -e " • API Gateway: ${BLUE}https://api.local${NC}"
|
||||
echo -e " • Grafana: ${BLUE}https://grafana.local${NC}"
|
||||
echo -e " • Review Portal: ${BLUE}https://review.local${NC}"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
58
scripts/update-dockerfiles.sh
Normal file
58
scripts/update-dockerfiles.sh
Normal file
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
# Update all Dockerfiles to use optimized requirements
|
||||
|
||||
set -e
|
||||
|
||||
echo "🔧 Updating Dockerfiles to use optimized requirements..."
|
||||
|
||||
# List of all services
|
||||
SERVICES=(
|
||||
"svc_extract"
|
||||
"svc_kg"
|
||||
"svc_rag_retriever"
|
||||
"svc_rag_indexer"
|
||||
"svc_forms"
|
||||
"svc_hmrc"
|
||||
"svc_ocr"
|
||||
"svc_rpa"
|
||||
"svc_normalize_map"
|
||||
"svc_reason"
|
||||
"svc_firm_connectors"
|
||||
"svc_coverage"
|
||||
)
|
||||
|
||||
for service in "${SERVICES[@]}"; do
|
||||
dockerfile="apps/$service/Dockerfile"
|
||||
|
||||
if [ ! -f "$dockerfile" ]; then
|
||||
echo "⚠️ Dockerfile not found: $dockerfile"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "📝 Updating $service..."
|
||||
|
||||
# Create backup
|
||||
cp "$dockerfile" "$dockerfile.bak"
|
||||
|
||||
# Update the requirements copy and install lines
|
||||
sed -i.tmp \
|
||||
-e 's|COPY libs/requirements\.txt /tmp/libs-requirements\.txt|COPY libs/requirements-base.txt /tmp/libs-requirements.txt|g' \
|
||||
-e 's|COPY apps/.*/requirements\.txt /tmp/requirements\.txt|COPY apps/'"$service"'/requirements.txt /tmp/requirements.txt|g' \
|
||||
-e 's|RUN pip install --no-cache-dir -r /tmp/requirements\.txt -r /tmp/libs-requirements\.txt|RUN pip install --no-cache-dir --upgrade pip \&\& \\\n pip install --no-cache-dir -r /tmp/libs-requirements.txt -r /tmp/requirements.txt|g' \
|
||||
"$dockerfile"
|
||||
|
||||
# Remove temp file
|
||||
rm -f "$dockerfile.tmp"
|
||||
|
||||
echo "✅ Updated $service"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "✅ All Dockerfiles updated!"
|
||||
echo ""
|
||||
echo "📋 Next steps:"
|
||||
echo "1. Review changes: git diff apps/*/Dockerfile"
|
||||
echo "2. Clean Docker cache: docker system prune -a"
|
||||
echo "3. Rebuild images: ./scripts/build-and-push-images.sh gitea.harkon.co.uk v1.0.1 blue"
|
||||
echo "4. Verify sizes: docker images | grep gitea.harkon.co.uk"
|
||||
|
||||
154
scripts/verify-deployment.sh
Executable file
154
scripts/verify-deployment.sh
Executable file
@@ -0,0 +1,154 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deployment Verification Script
|
||||
# Checks all services are running and healthy
|
||||
|
||||
set -e
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
REMOTE_USER="${REMOTE_USER:-deploy}"
|
||||
REMOTE_HOST="${REMOTE_HOST:-141.136.35.199}"
|
||||
DOMAIN="${DOMAIN:-harkon.co.uk}"
|
||||
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}AI Tax Agent - Deployment Verification${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo ""
|
||||
|
||||
# Function to check service health
|
||||
check_service() {
|
||||
local service_name=$1
|
||||
local url=$2
|
||||
|
||||
echo -n "Checking $service_name... "
|
||||
|
||||
if curl -s -f -o /dev/null "$url"; then
|
||||
echo -e "${GREEN}✓ OK${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}✗ FAILED${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check Docker container status
|
||||
check_container() {
|
||||
local container_name=$1
|
||||
|
||||
echo -n "Checking container $container_name... "
|
||||
|
||||
status=$(ssh ${REMOTE_USER}@${REMOTE_HOST} "docker ps --filter name=$container_name --format '{{.Status}}'" 2>/dev/null)
|
||||
|
||||
if [[ $status == *"Up"* ]]; then
|
||||
echo -e "${GREEN}✓ Running${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}✗ Not running${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo -e "${YELLOW}1. Checking Infrastructure Services${NC}"
|
||||
echo "-----------------------------------"
|
||||
|
||||
# Check containers on remote server
|
||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "docker ps --format 'table {{.Names}}\t{{.Status}}\t{{.Ports}}'" | grep -E "(vault|minio|postgres|redis|neo4j|qdrant|nats)" || true
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}2. Checking Infrastructure Endpoints${NC}"
|
||||
echo "------------------------------------"
|
||||
|
||||
check_service "Vault" "https://vault.${DOMAIN}/v1/sys/health" || true
|
||||
check_service "MinIO Console" "https://minio-console.${DOMAIN}" || true
|
||||
check_service "Neo4j Browser" "https://neo4j.${DOMAIN}" || true
|
||||
check_service "Qdrant" "https://qdrant.${DOMAIN}" || true
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}3. Checking Application Services${NC}"
|
||||
echo "--------------------------------"
|
||||
|
||||
# Check application containers
|
||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "docker ps --format 'table {{.Names}}\t{{.Status}}'" | grep -E "svc-" || true
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}4. Checking Application Endpoints${NC}"
|
||||
echo "---------------------------------"
|
||||
|
||||
check_service "API Gateway" "https://api.${DOMAIN}/health" || true
|
||||
check_service "UI" "https://app.${DOMAIN}" || true
|
||||
|
||||
# Check individual services
|
||||
services=(
|
||||
"ingestion"
|
||||
"extract"
|
||||
"kg"
|
||||
"rag-retriever"
|
||||
"rag-indexer"
|
||||
"forms"
|
||||
"hmrc"
|
||||
"ocr"
|
||||
"rpa"
|
||||
"normalize-map"
|
||||
"reason"
|
||||
"firm-connectors"
|
||||
"coverage"
|
||||
)
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
check_service "svc-$service" "https://api.${DOMAIN}/$service/health" || true
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}5. Checking Monitoring Stack${NC}"
|
||||
echo "----------------------------"
|
||||
|
||||
check_service "Prometheus" "https://prometheus.${DOMAIN}/-/healthy" || true
|
||||
check_service "Grafana" "https://grafana.${DOMAIN}/api/health" || true
|
||||
check_service "Loki" "https://loki.${DOMAIN}/ready" || true
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}6. Checking Docker Networks${NC}"
|
||||
echo "--------------------------"
|
||||
|
||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "docker network ls | grep -E '(frontend|backend)'" || true
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}7. Checking Disk Usage${NC}"
|
||||
echo "---------------------"
|
||||
|
||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "df -h | grep -E '(Filesystem|/opt|/var/lib/docker)'" || true
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}8. Checking Memory Usage${NC}"
|
||||
echo "-----------------------"
|
||||
|
||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "free -h" || true
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}9. Recent Container Logs (Last 10 lines)${NC}"
|
||||
echo "---------------------------------------"
|
||||
|
||||
# Get logs from a few key services
|
||||
for container in vault minio postgres svc-ingestion svc-extract; do
|
||||
echo -e "\n${BLUE}=== $container ===${NC}"
|
||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "docker logs $container --tail 10 2>&1" || echo "Container not found"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}Verification Complete${NC}"
|
||||
echo -e "${BLUE}========================================${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Next Steps:${NC}"
|
||||
echo "1. Check any failed services above"
|
||||
echo "2. Review logs for errors: ssh ${REMOTE_USER}@${REMOTE_HOST} 'docker logs <container-name>'"
|
||||
echo "3. Access Grafana: https://grafana.${DOMAIN}"
|
||||
echo "4. Access Application: https://app.${DOMAIN}"
|
||||
echo ""
|
||||
|
||||
44
scripts/verify-infra.sh
Executable file
44
scripts/verify-infra.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." && pwd)
|
||||
COMPOSE_DIR="$ROOT_DIR/infra/compose"
|
||||
|
||||
get_env() {
|
||||
local key="$1"; local def="${2-}"
|
||||
local line
|
||||
line=$(grep -E "^${key}=" "$COMPOSE_DIR/.env" | tail -n1 || true)
|
||||
if [[ -z "$line" ]]; then printf "%s" "$def"; return; fi
|
||||
printf "%s" "${line#*=}"
|
||||
}
|
||||
|
||||
DOMAIN=${DOMAIN:-$(get_env DOMAIN local)}
|
||||
|
||||
echo "🔎 Verifying core infra endpoints for domain: $DOMAIN..."
|
||||
|
||||
check() {
|
||||
local name="$1" url="$2"
|
||||
code=$(curl -ks -o /dev/null -w '%{http_code}' "$url" || true)
|
||||
if [[ "$code" == "200" || "$code" == "302" || "$code" == "401" ]]; then
|
||||
echo "✅ $name ($url) -> $code"
|
||||
else
|
||||
echo "❌ $name ($url) -> $code"; return 1
|
||||
fi
|
||||
}
|
||||
|
||||
ok=true
|
||||
check Traefik "http://localhost:8080/ping" || ok=false
|
||||
check Authentik "https://auth.${DOMAIN}/if/flow/default-authentication-flow/" || ok=false
|
||||
check Grafana "https://grafana.${DOMAIN}" || ok=false
|
||||
check Unleash "https://unleash.${DOMAIN}" || ok=false
|
||||
check Neo4j "https://neo4j.${DOMAIN}" || ok=false
|
||||
check Qdrant "https://qdrant.${DOMAIN}/health" || ok=false
|
||||
check Vault "https://vault.${DOMAIN}/v1/sys/health" || ok=false
|
||||
check Minio "https://minio.${DOMAIN}" || ok=false
|
||||
|
||||
if [[ "$ok" == true ]]; then
|
||||
echo "🎉 Infra endpoints reachable"
|
||||
else
|
||||
echo "⚠️ Some checks failed. Use 'make logs' or 'make logs-service SERVICE=name'"
|
||||
exit 1
|
||||
fi
|
||||
Reference in New Issue
Block a user