clean up base infra
Some checks failed
CI/CD Pipeline / Code Quality & Linting (push) Has been cancelled
CI/CD Pipeline / Policy Validation (push) Has been cancelled
CI/CD Pipeline / Test Suite (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-coverage) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-extract) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-firm-connectors) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-forms) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-hmrc) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-ingestion) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-kg) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-normalize-map) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-ocr) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-rag-indexer) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-rag-retriever) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-reason) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (svc-rpa) (push) Has been cancelled
CI/CD Pipeline / Build Docker Images (ui-review) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-coverage) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-extract) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-kg) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (svc-rag-retriever) (push) Has been cancelled
CI/CD Pipeline / Security Scanning (ui-review) (push) Has been cancelled
CI/CD Pipeline / Generate SBOM (push) Has been cancelled
CI/CD Pipeline / Deploy to Staging (push) Has been cancelled
CI/CD Pipeline / Deploy to Production (push) Has been cancelled
CI/CD Pipeline / Notifications (push) Has been cancelled

This commit is contained in:
harkon
2025-10-11 11:42:43 +01:00
parent b324ff09ef
commit f0f7674b8d
52 changed files with 663 additions and 5224 deletions

2
.gitignore vendored
View File

@@ -14,7 +14,7 @@ mappings.txt
restore_by_prefix.sh restore_by_prefix.sh
restore_from_file_header.py restore_from_file_header.py
guess_ext_and_rename.py guess_ext_and_rename.py
harkon/
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
__pycache__/ __pycache__/

View File

@@ -8,12 +8,13 @@ disable =
C0114, # missing-module-docstring (optional) C0114, # missing-module-docstring (optional)
C0115, # missing-class-docstring (optional) C0115, # missing-class-docstring (optional)
C0116, # missing-function-docstring (optional) C0116, # missing-function-docstring (optional)
W1203, # logging-fstring-interpolation (optional)
[TYPECHECK] [TYPECHECK]
ignored-modules = pydantic, pydantic_settings ignored-modules = pydantic, pydantic_settings
[FORMAT] [FORMAT]
max-line-length = 100 max-line-length = 120
[DESIGN] [DESIGN]
max-args = 8 max-args = 8

View File

@@ -69,7 +69,7 @@ networks-clean: ## Remove external Docker networks
# Development lifecycle # Development lifecycle
run: ## Start all services in development mode run: ## Start all services in development mode
@echo "🏃 Starting AI Tax Agent System..." @echo "🏃 Starting AI Tax Agent System..."
@./scripts/deploy-with-fixes.sh @./scripts/deploy.sh
run-simple: ## Start all services without fixes (original behavior) run-simple: ## Start all services without fixes (original behavior)
@echo "🏃 Starting AI Tax Agent System (simple)..." @echo "🏃 Starting AI Tax Agent System (simple)..."
@@ -116,11 +116,11 @@ build-service: ## Build specific service (usage: make build-service SERVICE=svc-
deploy-infra: networks ## Deploy only infrastructure services deploy-infra: networks ## Deploy only infrastructure services
@echo "🏗️ Deploying infrastructure services..." @echo "🏗️ Deploying infrastructure services..."
@./scripts/generate-dev-certs.sh @./scripts/generate-dev-certs.sh
@cd infra/compose && docker compose -f docker-compose.local.yml up -d traefik postgres redis authentik-db authentik-redis @cd infra/compose && docker compose -f docker-compose.local.yml up -d ata-traefik ata-postgres ata-redis ata-authentik-db ata-authentik-redis
@echo "⏳ Waiting for databases..." @echo "⏳ Waiting for databases..."
@sleep 15 @sleep 15
@make fix-databases @make fix-databases
@cd infra/compose && docker compose -f docker-compose.local.yml up -d authentik-server authentik-worker authentik-outpost vault neo4j qdrant minio prometheus grafana loki @cd infra/compose && docker compose -f docker-compose.local.yml up -d ata-authentik-server ata-authentik-worker ata-authentik-outpost ata-vault ata-neo4j ata-qdrant ata-minio ata-prometheus ata-grafana ata-loki
@echo "✅ Infrastructure deployment complete" @echo "✅ Infrastructure deployment complete"
@echo "⏳ Waiting for services to be ready..." @echo "⏳ Waiting for services to be ready..."
@sleep 30 @sleep 30
@@ -128,7 +128,7 @@ deploy-infra: networks ## Deploy only infrastructure services
deploy-services: ## Deploy only application services deploy-services: ## Deploy only application services
@echo "🚀 Deploying application services..." @echo "🚀 Deploying application services..."
@cd infra/compose && docker compose -f docker-compose.local.yml up -d svc-ingestion svc-extract svc-forms svc-hmrc svc-kg svc-normalize-map svc-ocr svc-rag-indexer svc-rag-retriever svc-reason svc-rpa svc-firm-connectors ui-review unleash @cd infra/compose && docker compose -f docker-compose.local.yml up -d ata-svc-ingestion ata-svc-extract ata-svc-forms ata-svc-hmrc ata-svc-kg ata-svc-normalize-map ata-svc-ocr ata-svc-rag-indexer ata-svc-rag-retriever ata-svc-reason ata-svc-rpa ata-svc-firm-connectors ata-ui-review ata-unleash
@echo "✅ Services deployment complete" @echo "✅ Services deployment complete"
# Development tools # Development tools
@@ -236,7 +236,7 @@ deploy-monitoring-prod: ## Deploy monitoring stack (production)
seed: ## Seed the system with initial data seed: ## Seed the system with initial data
@echo "🌱 Seeding system with initial data..." @echo "🌱 Seeding system with initial data..."
@echo "📊 Creating Neo4j constraints and indexes..." @echo "📊 Creating Neo4j constraints and indexes..."
@docker exec neo4j cypher-shell -u neo4j -p $(NEO4J_PASSWORD) -f /var/lib/neo4j/import/schema.cypher 2>/dev/null || echo "Neo4j not ready" @docker exec ata-neo4j cypher-shell -u neo4j -p $(NEO4J_PASSWORD) -f /var/lib/neo4j/import/schema.cypher 2>/dev/null || echo "Neo4j not ready"
@echo "🗂️ Creating Qdrant collections..." @echo "🗂️ Creating Qdrant collections..."
@curl -X PUT "http://localhost:6333/collections/documents" -H "Content-Type: application/json" -d '{"vectors": {"size": 1536, "distance": "Cosine"}}' 2>/dev/null || echo "Qdrant not ready" @curl -X PUT "http://localhost:6333/collections/documents" -H "Content-Type: application/json" -d '{"vectors": {"size": 1536, "distance": "Cosine"}}' 2>/dev/null || echo "Qdrant not ready"
@echo "✅ Seeding complete" @echo "✅ Seeding complete"
@@ -264,12 +264,12 @@ status: ## Show status of all services
health: ## Check health of all services health: ## Check health of all services
@echo "🏥 Health Check:" @echo "🏥 Health Check:"
@echo "🔗 Traefik: $$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080/ping || echo 'DOWN')" @echo "🔗 Traefik: $$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080/ping || echo 'DOWN')"
@echo "🗄️ PostgreSQL: $$(docker exec postgres pg_isready -U postgres 2>/dev/null && echo 'UP' || echo 'DOWN')" @echo "🗄️ PostgreSQL: $$(docker exec ata-postgres pg_isready -U postgres 2>/dev/null && echo 'UP' || echo 'DOWN')"
@echo "📊 Neo4j: $$(curl -s -o /dev/null -w '%{http_code}' http://localhost:7474 || echo 'DOWN')" @echo "📊 Neo4j: $$(curl -s -o /dev/null -w '%{http_code}' http://localhost:7474 || echo 'DOWN')"
@echo "🔍 Qdrant: $$(curl -s -o /dev/null -w '%{http_code}' http://localhost:6333/health || echo 'DOWN')" @echo "🔍 Qdrant: $$(curl -s -o /dev/null -w '%{http_code}' http://localhost:6333/health || echo 'DOWN')"
@echo "📦 MinIO: $$(curl -s -o /dev/null -w '%{http_code}' http://localhost:9000/minio/health/live || echo 'DOWN')" @echo "📦 MinIO: $$(curl -s -o /dev/null -w '%{http_code}' http://localhost:9000/minio/health/live || echo 'DOWN')"
@echo "🔐 Vault: $$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8200/v1/sys/health || echo 'DOWN')" @echo "🔐 Vault: $$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8200/v1/sys/health || echo 'DOWN')"
@echo "🏃 Redis: $$(docker exec redis redis-cli ping 2>/dev/null || echo 'DOWN')" @echo "🏃 Redis: $$(docker exec ata-redis redis-cli ping 2>/dev/null || echo 'DOWN')"
@echo "🔐 Authentik: $$(curl -s -k -o /dev/null -w '%{http_code}' https://auth.local || echo 'DOWN')" @echo "🔐 Authentik: $$(curl -s -k -o /dev/null -w '%{http_code}' https://auth.local || echo 'DOWN')"
verify: ## Run comprehensive infrastructure verification verify: ## Run comprehensive infrastructure verification
@@ -282,18 +282,18 @@ troubleshoot: ## Run comprehensive troubleshooting and fixes
restart-authentik: ## Restart Authentik components in correct order restart-authentik: ## Restart Authentik components in correct order
@echo "🔄 Restarting Authentik components..." @echo "🔄 Restarting Authentik components..."
@cd infra/compose && docker compose -f docker-compose.local.yml stop authentik-server authentik-worker authentik-outpost @cd infra/compose && docker compose -f docker-compose.local.yml stop ata-authentik-server ata-authentik-worker ata-authentik-outpost
@make fix-databases @make fix-databases
@cd infra/compose && docker compose -f docker-compose.local.yml up -d authentik-server @cd infra/compose && docker compose -f docker-compose.local.yml up -d ata-authentik-server
@sleep 15 @sleep 15
@cd infra/compose && docker compose -f docker-compose.local.yml up -d authentik-worker authentik-outpost @cd infra/compose && docker compose -f docker-compose.local.yml up -d ata-authentik-worker ata-authentik-outpost
@echo "✅ Authentik restart complete" @echo "✅ Authentik restart complete"
restart-unleash: ## Restart Unleash with database fixes restart-unleash: ## Restart Unleash with database fixes
@echo "🔄 Restarting Unleash..." @echo "🔄 Restarting Unleash..."
@cd infra/compose && docker compose -f docker-compose.local.yml stop unleash @cd infra/compose && docker compose -f docker-compose.local.yml stop ata-unleash
@make fix-databases @make fix-databases
@cd infra/compose && docker compose -f docker-compose.local.yml up -d unleash @cd infra/compose && docker compose -f docker-compose.local.yml up -d ata-unleash
@echo "✅ Unleash restart complete" @echo "✅ Unleash restart complete"
# Cleanup # Cleanup
@@ -320,13 +320,13 @@ shell: ## Open shell in specific service (usage: make shell SERVICE=svc-extract)
@docker exec -it $(SERVICE) /bin/bash @docker exec -it $(SERVICE) /bin/bash
db-shell: ## Open PostgreSQL shell db-shell: ## Open PostgreSQL shell
@docker exec -it postgres psql -U postgres -d tax_system @docker exec -it ata-postgres psql -U postgres -d tax_system
neo4j-shell: ## Open Neo4j shell neo4j-shell: ## Open Neo4j shell
@docker exec -it neo4j cypher-shell -u neo4j -p $(NEO4J_PASSWORD) @docker exec -it ata-neo4j cypher-shell -u neo4j -p $(NEO4J_PASSWORD)
redis-shell: ## Open Redis shell redis-shell: ## Open Redis shell
@docker exec -it redis redis-cli @docker exec -it ata-redis redis-cli
# Documentation # Documentation
docs: ## Generate documentation docs: ## Generate documentation
@@ -361,9 +361,9 @@ load-test: ## Run load tests
backup: ## Create backup of all data backup: ## Create backup of all data
@echo "💾 Creating backup..." @echo "💾 Creating backup..."
@mkdir -p backups/$$(date +%Y%m%d_%H%M%S) @mkdir -p backups/$$(date +%Y%m%d_%H%M%S)
@docker exec postgres pg_dump -U postgres tax_system > backups/$$(date +%Y%m%d_%H%M%S)/postgres.sql @docker exec ata-postgres pg_dump -U postgres tax_system > backups/$$(date +%Y%m%d_%H%M%S)/postgres.sql
@docker exec neo4j neo4j-admin dump --database=neo4j --to=/tmp/neo4j.dump @docker exec ata-neo4j neo4j-admin dump --database=neo4j --to=/tmp/neo4j.dump
@docker cp neo4j:/tmp/neo4j.dump backups/$$(date +%Y%m%d_%H%M%S)/ @docker cp ata-neo4j:/tmp/neo4j.dump backups/$$(date +%Y%m%d_%H%M%S)/
@echo "✅ Backup created in backups/ directory" @echo "✅ Backup created in backups/ directory"
restore: ## Restore from backup (usage: make restore BACKUP=20240101_120000) restore: ## Restore from backup (usage: make restore BACKUP=20240101_120000)
@@ -374,9 +374,9 @@ restore: ## Restore from backup (usage: make restore BACKUP=20240101_120000)
@echo "📥 Restoring from backup $(BACKUP)..." @echo "📥 Restoring from backup $(BACKUP)..."
@echo "⚠️ This will overwrite existing data!" @echo "⚠️ This will overwrite existing data!"
@read -p "Are you sure? (y/N): " confirm && [ "$$confirm" = "y" ] || exit 1 @read -p "Are you sure? (y/N): " confirm && [ "$$confirm" = "y" ] || exit 1
@docker exec -i postgres psql -U postgres -d tax_system < backups/$(BACKUP)/postgres.sql @docker exec -i ata-postgres psql -U postgres -d tax_system < backups/$(BACKUP)/postgres.sql
@docker cp backups/$(BACKUP)/neo4j.dump neo4j:/tmp/ @docker cp backups/$(BACKUP)/neo4j.dump ata-neo4j:/tmp/
@docker exec neo4j neo4j-admin load --database=neo4j --from=/tmp/neo4j.dump --force @docker exec ata-neo4j neo4j-admin load --database=neo4j --from=/tmp/neo4j.dump --force
@echo "✅ Restore complete" @echo "✅ Restore complete"
# Environment variables # Environment variables

View File

@@ -237,7 +237,7 @@ async def get_document_info(
try: try:
# Check if document exists # Check if document exists
ingestion_settings = cast(IngestionSettings, settings) ingestion_settings = settings
bucket_name = ingestion_settings.raw_documents_bucket bucket_name = ingestion_settings.raw_documents_bucket
object_key = f"tenants/{tenant_id}/raw/{doc_id}.pdf" object_key = f"tenants/{tenant_id}/raw/{doc_id}.pdf"
@@ -289,7 +289,7 @@ async def delete_document(
try: try:
# Delete from storage # Delete from storage
ingestion_settings = cast(IngestionSettings, settings) ingestion_settings = settings
bucket_name = ingestion_settings.raw_documents_bucket bucket_name = ingestion_settings.raw_documents_bucket
object_key = f"tenants/{tenant_id}/raw/{doc_id}.pdf" object_key = f"tenants/{tenant_id}/raw/{doc_id}.pdf"
@@ -313,7 +313,7 @@ async def _validate_upload(file: UploadFile) -> None:
"""Validate uploaded file""" """Validate uploaded file"""
# Cast settings to the correct type # Cast settings to the correct type
ingestion_settings = cast(IngestionSettings, settings) ingestion_settings = settings
# Check file size # Check file size
if file.size and file.size > ingestion_settings.max_file_size: if file.size and file.size > ingestion_settings.max_file_size:

View File

@@ -1,9 +1,9 @@
# Service-specific dependencies for svc_ingestion # Service-specific dependencies for svc_ingestion
# File upload and processing # File upload and processing
aiofiles>=23.2.0 aiofiles>=25.1.0
# MIME type detection # MIME type detection
python-magic>=0.4.27 python-magic>=0.4.27
# Image processing (for thumbnails) - lightweight # Image processing (for thumbnails) - lightweight
Pillow>=10.1.0 Pillow>=11.3.0

View File

@@ -99,13 +99,13 @@
- [ ] Verify environment: `cat infra/environments/production/.env` - [ ] Verify environment: `cat infra/environments/production/.env`
- [ ] Deploy: `./infra/scripts/deploy.sh production infrastructure` - [ ] Deploy: `./infra/scripts/deploy.sh production infrastructure`
- [ ] Wait for services: `sleep 30` - [ ] Wait for services: `sleep 30`
- [ ] Check status: `docker ps | grep -E "vault|minio|postgres|neo4j|qdrant|redis|nats"` - [ ] Check status: `docker ps | grep -E "apa-vault|apa-minio|apa-postgres|apa-neo4j|apa-qdrant|apa-redis|apa-nats"`
- [ ] Verify Vault: `curl https://vault.harkon.co.uk/v1/sys/health` - [ ] Verify Vault: `curl https://vault.harkon.co.uk/v1/sys/health`
- [ ] Verify MinIO: `curl https://minio-api.harkon.co.uk/minio/health/live` - [ ] Verify MinIO: `curl https://minio-api.harkon.co.uk/minio/health/live`
- [ ] Verify PostgreSQL: `docker exec postgres pg_isready` - [ ] Verify PostgreSQL: `docker exec apa-postgres pg_isready`
- [ ] Verify Neo4j: `curl http://localhost:7474` - [ ] Verify Neo4j: `curl http://localhost:7474`
- [ ] Verify Qdrant: `curl http://localhost:6333/health` - [ ] Verify Qdrant: `curl http://localhost:6333/health`
- [ ] Verify Redis: `docker exec redis redis-cli ping` - [ ] Verify Redis: `docker exec apa-redis redis-cli ping`
- [ ] Verify NATS: `docker logs nats | grep "Server is ready"` - [ ] Verify NATS: `docker logs nats | grep "Server is ready"`
#### Initialize Vault #### Initialize Vault
@@ -133,13 +133,13 @@
#### Initialize Databases #### Initialize Databases
- [ ] PostgreSQL: - [ ] PostgreSQL:
- [ ] Access: `docker exec -it postgres psql -U postgres` - [ ] Access: `docker exec -it apa-postgres psql -U postgres`
- [ ] Create databases: `CREATE DATABASE tax_system;` - [ ] Create databases: `CREATE DATABASE tax_system;`
- [ ] Verify: `\l` - [ ] Verify: `\l`
- [ ] Exit: `\q` - [ ] Exit: `\q`
- [ ] Neo4j: - [ ] Neo4j:
- [ ] Access: `docker exec -it neo4j cypher-shell -u neo4j -p <password>` - [ ] Access: `docker exec -it apa-neo4j cypher-shell -u neo4j -p <password>`
- [ ] Create constraints (if needed) - [ ] Create constraints (if needed)
- [ ] Exit: `:exit` - [ ] Exit: `:exit`
@@ -219,13 +219,13 @@ For each service that needs OAuth:
### Service Accessibility ### Service Accessibility
- [ ] Traefik Dashboard: `https://traefik.harkon.co.uk` - [ ] Traefik Dashboard: `https://traefik.harkon.co.uk`
- [ ] Authentik: `https://authentik.harkon.co.uk` - [ ] Authentik: `https://auth.harkon.co.uk`
- [ ] Gitea: `https://gitea.harkon.co.uk` - [ ] Gitea: `https://gitea.harkon.co.uk`
- [ ] Grafana: `https://grafana.harkon.co.uk` - [ ] Grafana: `https://grafana.harkon.co.uk`
- [ ] Prometheus: `https://prometheus.harkon.co.uk` - [ ] Prometheus: `https://prometheus.harkon.co.uk`
- [ ] Vault: `https://vault.harkon.co.uk` - [ ] Vault: `https://vault.harkon.co.uk`
- [ ] MinIO: `https://minio.harkon.co.uk` - [ ] MinIO: `https://minio.harkon.co.uk`
- [ ] UI Review: `https://ui-review.harkon.co.uk` - [ ] UI Review: `https://app.harkon.co.uk`
### Health Checks ### Health Checks
@@ -274,8 +274,8 @@ If deployment fails:
### Restore Data ### Restore Data
- [ ] PostgreSQL: `docker exec -i postgres psql -U postgres -d tax_system < backup.sql` - [ ] PostgreSQL: `docker exec -i apa-postgres psql -U postgres -d tax_system < backup.sql`
- [ ] Neo4j: `docker exec neo4j neo4j-admin load --from=/backup/neo4j.dump` - [ ] Neo4j: `docker exec apa-neo4j neo4j-admin load --from=/backup/neo4j.dump`
- [ ] MinIO: Restore from backup bucket - [ ] MinIO: Restore from backup bucket
- [ ] Vault: Restore from snapshot - [ ] Vault: Restore from snapshot
@@ -320,4 +320,3 @@ If deployment fails:
- Document any deviations - Document any deviations
- Note any issues encountered - Note any issues encountered
- Update runbooks based on experience - Update runbooks based on experience

View File

@@ -1,4 +1,4 @@
# Unified Infrastructure Deployment Plan # Isolated Stacks Deployment Plan
## Executive Summary ## Executive Summary
@@ -19,7 +19,7 @@ This plan outlines the strategy to host both the **AI Tax Agent application** an
- **SSL**: Let's Encrypt via GoDaddy DNS challenge - **SSL**: Let's Encrypt via GoDaddy DNS challenge
- **Exposed Subdomains**: - **Exposed Subdomains**:
- `traefik.harkon.co.uk` - `traefik.harkon.co.uk`
- `authentik.harkon.co.uk` - `auth.harkon.co.uk`
- `gitea.harkon.co.uk` - `gitea.harkon.co.uk`
- `cloud.harkon.co.uk` - `cloud.harkon.co.uk`
- `portainer.harkon.co.uk` - `portainer.harkon.co.uk`
@@ -61,48 +61,14 @@ This plan outlines the strategy to host both the **AI Tax Agent application** an
- Company services need to remain stable - Company services need to remain stable
- Application services need independent deployment/rollback - Application services need independent deployment/rollback
## Recommended Architecture # Decision: Keep Stacks Completely Separate
### Option A: Unified Traefik & Authentik (RECOMMENDED) We will deploy the company services and the AI Tax Agent as two fully isolated stacks, each with its own Traefik and Authentik. This maximizes blast-radius isolation and avoids naming and DNS conflicts across environments.
**Pros**: Key implications:
- Single point of entry - Separate external networks and DNS namespaces per stack
- Shared authentication across all services - Duplicate edge (Traefik) and IdP (Authentik), independent upgrades and rollbacks
- Simplified SSL management - Slightly higher resource usage in exchange for strong isolation
- Cost-effective (one Traefik, one Authentik)
**Cons**:
- Application deployments could affect company services
- Requires careful configuration management
**Implementation**:
```
/opt/compose/
├── traefik/ # Shared Traefik (existing)
├── authentik/ # Shared Authentik (existing)
├── company/ # Company services
│ ├── gitea/
│ ├── nextcloud/
│ └── portainer/
└── ai-tax-agent/ # Application services
├── infrastructure/ # App-specific infra (Vault, MinIO, Neo4j, etc.)
└── services/ # Microservices
```
### Option B: Isolated Stacks
**Pros**:
- Complete isolation
- Independent scaling
- No cross-contamination
**Cons**:
- Duplicate Traefik/Authentik
- More complex SSL management
- Higher resource usage
- Users need separate logins
## Proposed Solution: Hybrid Approach
### Architecture Overview ### Architecture Overview
@@ -136,18 +102,18 @@ This plan outlines the strategy to host both the **AI Tax Agent application** an
└─────────┘ └─────────┘
``` ```
### Directory Structure ### Directory Structure (per stack)
``` ```
/opt/compose/ /opt/compose/<stack>/
├── traefik/ # Shared reverse proxy ├── traefik/ # Stack-local reverse proxy
│ ├── compose.yaml │ ├── compose.yaml
│ ├── config/ │ ├── config/
│ │ ├── traefik.yaml # Static config │ │ ├── traefik.yaml # Static config
│ │ ├── dynamic-company.yaml │ │ ├── dynamic-company.yaml
│ │ └── dynamic-app.yaml │ │ └── dynamic-app.yaml
│ └── certs/ │ └── certs/
├── authentik/ # Shared SSO ├── authentik/ # Stack-local SSO
│ ├── compose.yaml │ ├── compose.yaml
│ └── ... │ └── ...
├── company/ # Company services namespace ├── company/ # Company services namespace
@@ -157,7 +123,7 @@ This plan outlines the strategy to host both the **AI Tax Agent application** an
│ │ └── compose.yaml │ │ └── compose.yaml
│ └── portainer/ │ └── portainer/
│ └── compose.yaml │ └── compose.yaml
└── ai-tax-agent/ # Application namespace └── ai-tax-agent/ # Application namespace (if this is the app stack)
├── .env # Production environment ├── .env # Production environment
├── infrastructure.yaml # Vault, MinIO, Neo4j, Qdrant, etc. ├── infrastructure.yaml # Vault, MinIO, Neo4j, Qdrant, etc.
├── services.yaml # All microservices ├── services.yaml # All microservices
@@ -166,32 +132,29 @@ This plan outlines the strategy to host both the **AI Tax Agent application** an
### Network Strategy ### Network Strategy
**Shared Networks**: - Use stack-scoped network names to avoid collisions: `apa-frontend`, `apa-backend`.
- `frontend` - For all services exposed via Traefik - Only attach services that must be public to `apa-frontend`.
- `backend` - For internal service communication - Keep internal communication on `apa-backend`.
**Application-Specific Networks** (optional):
- `ai-tax-agent-internal` - For app-only internal communication
### Domain Mapping ### Domain Mapping
**Company Services** (existing): **Company Services** (existing):
- `traefik.harkon.co.uk` - Traefik dashboard - `traefik.harkon.co.uk` - Traefik dashboard
- `authentik.harkon.co.uk` - Authentik SSO - `auth.harkon.co.uk` - Authentik SSO
- `gitea.harkon.co.uk` - Git hosting - `gitea.harkon.co.uk` - Git hosting
- `cloud.harkon.co.uk` - Nextcloud - `cloud.harkon.co.uk` - Nextcloud
- `portainer.harkon.co.uk` - Docker management - `portainer.harkon.co.uk` - Docker management
**Application Services** (new): **Application Services** (app stack):
- `app.harkon.co.uk` - Review UI - `review.<domain>` - Review UI
- `api.harkon.co.uk` - API Gateway (all microservices) - `api.<domain>` - API Gateway (microservices via Traefik)
- `vault.harkon.co.uk` - Vault UI (admin only) - `vault.<domain>` - Vault UI (admin only)
- `minio.harkon.co.uk` - MinIO Console (admin only) - `minio.<domain>` - MinIO Console (admin only)
- `neo4j.harkon.co.uk` - Neo4j Browser (admin only) - `neo4j.<domain>` - Neo4j Browser (admin only)
- `qdrant.harkon.co.uk` - Qdrant UI (admin only) - `qdrant.<domain>` - Qdrant UI (admin only)
- `grafana.harkon.co.uk` - Grafana (monitoring) - `grafana.<domain>` - Grafana (monitoring)
- `prometheus.harkon.co.uk` - Prometheus (admin only) - `prometheus.<domain>` - Prometheus (admin only)
- `loki.harkon.co.uk` - Loki (admin only) - `loki.<domain>` - Loki (admin only)
### Authentication Strategy ### Authentication Strategy
@@ -208,6 +171,12 @@ This plan outlines the strategy to host both the **AI Tax Agent application** an
- `rate-limit` - Standard rate limiting - `rate-limit` - Standard rate limiting
- `api-rate-limit` - Stricter API rate limiting - `api-rate-limit` - Stricter API rate limiting
## Implementation Notes
- infra/base/infrastructure.yaml now includes Traefik and Authentik in the infrastructure stack with stack-scoped networks and service names.
- All infrastructure component service keys and container names use the `apa-` prefix to avoid DNS collisions on shared Docker hosts.
- Traefik static and dynamic configs live under `infra/base/traefik/config/`.
## Local Development Workflow ## Local Development Workflow
### Development Environment ### Development Environment
@@ -342,4 +311,3 @@ Create three new compose files for production:
3. Create production compose files 3. Create production compose files
4. Set up CI/CD pipeline for automated deployment 4. Set up CI/CD pipeline for automated deployment
5. Execute Phase 1 (Preparation) 5. Execute Phase 1 (Preparation)

View File

@@ -27,12 +27,12 @@ EOF
```bash ```bash
# Copy production compose files # Copy production compose files
scp infra/compose/production/infrastructure.yaml deploy@141.136.35.199:/opt/ai-tax-agent/compose/production/ scp infra/base/infrastructure.yaml deploy@141.136.35.199:/opt/ai-tax-agent/compose/production/
scp infra/compose/production/services.yaml deploy@141.136.35.199:/opt/ai-tax-agent/compose/production/ scp infra/base/services.yaml deploy@141.136.35.199:/opt/ai-tax-agent/compose/production/
scp infra/compose/production/monitoring.yaml deploy@141.136.35.199:/opt/ai-tax-agent/compose/production/ scp infra/base/monitoring.yaml deploy@141.136.35.199:/opt/ai-tax-agent/compose/production/
# Copy environment file # Copy environment file
scp infra/compose/.env.production deploy@141.136.35.199:/opt/ai-tax-agent/compose/.env.production scp infra/environments/production/.env deploy@141.136.35.199:/opt/ai-tax-agent/compose/.env
# Copy monitoring configs # Copy monitoring configs
scp infra/compose/prometheus/prometheus.yml deploy@141.136.35.199:/opt/ai-tax-agent/compose/prometheus/ scp infra/compose/prometheus/prometheus.yml deploy@141.136.35.199:/opt/ai-tax-agent/compose/prometheus/
@@ -123,17 +123,17 @@ ssh deploy@141.136.35.199 "rm ~/vault-keys.txt"
```bash ```bash
# MinIO is ready immediately, access at: # MinIO is ready immediately, access at:
# https://minio-console.harkon.co.uk # https://minio.harkon.co.uk
# Username: admin (from .env.production MINIO_ROOT_USER) # Username: admin (from .env.production MINIO_ROOT_USER)
# Password: <from .env.production MINIO_ROOT_PASSWORD> # Password: <from .env.production MINIO_ROOT_PASSWORD>
# Create required buckets # Create required buckets
ssh deploy@141.136.35.199 << 'EOF' ssh deploy@141.136.35.199 << 'EOF'
docker exec minio mc alias set local http://localhost:9000 admin <MINIO_ROOT_PASSWORD> docker exec apa-minio mc alias set local http://localhost:9000 admin <MINIO_ROOT_PASSWORD>
docker exec minio mc mb local/documents docker exec apa-minio mc mb local/documents
docker exec minio mc mb local/processed docker exec apa-minio mc mb local/processed
docker exec minio mc mb local/models docker exec apa-minio mc mb local/models
docker exec minio mc mb local/temp docker exec apa-minio mc mb local/temp
EOF EOF
``` ```
@@ -147,7 +147,7 @@ EOF
# Verify connection # Verify connection
ssh deploy@141.136.35.199 << 'EOF' ssh deploy@141.136.35.199 << 'EOF'
docker exec neo4j cypher-shell -u neo4j -p <NEO4J_PASSWORD> "RETURN 'Connected' as status;" docker exec apa-neo4j cypher-shell -u neo4j -p <NEO4J_PASSWORD> "RETURN 'Connected' as status;"
EOF EOF
``` ```
@@ -181,7 +181,7 @@ EOF
### Step 10: Configure Authentik OAuth for Grafana ### Step 10: Configure Authentik OAuth for Grafana
1. **Login to Authentik**: https://authentik.harkon.co.uk 1. **Login to Authentik**: https://auth.harkon.co.uk
2. **Create OAuth Provider**: 2. **Create OAuth Provider**:
- Applications → Providers → Create - Applications → Providers → Create
- Type: OAuth2/OpenID Provider - Type: OAuth2/OpenID Provider
@@ -210,7 +210,7 @@ EOF
# Restart Grafana # Restart Grafana
cd /opt/ai-tax-agent cd /opt/ai-tax-agent
docker compose -f compose/production/monitoring.yaml restart grafana docker compose -f compose/production/monitoring.yaml restart apa-grafana
``` ```
### Step 11: Verify Deployment ### Step 11: Verify Deployment
@@ -375,4 +375,3 @@ For issues or questions:
- Check logs: `./scripts/verify-deployment.sh` - Check logs: `./scripts/verify-deployment.sh`
- Review documentation: `docs/DEPLOYMENT_CHECKLIST.md` - Review documentation: `docs/DEPLOYMENT_CHECKLIST.md`
- Contact: [Your support contact] - Contact: [Your support contact]

View File

@@ -1,541 +0,0 @@
# AI Tax Agent Infrastructure Deployment Guide
Complete guide for deploying AI Tax Agent infrastructure across all environments.
## Table of Contents
1. [Prerequisites](#prerequisites)
2. [Quick Start](#quick-start)
3. [Local Development](#local-development)
4. [Development Server](#development-server)
5. [Production Server](#production-server)
6. [Troubleshooting](#troubleshooting)
---
## Prerequisites
### Required Software
- Docker 24.0+ with Compose V2
- Git
- SSH access (for remote deployments)
- Domain with DNS access (for dev/prod)
### Required Accounts
- GoDaddy account (for DNS-01 challenge)
- Gitea account (for container registry)
- OpenAI/Anthropic API keys (optional)
### Network Requirements
- Ports 80, 443 open (for Traefik)
- Docker networks: `frontend`, `backend`
---
## Quick Start
### 1. Clone Repository
```bash
git clone <repository-url>
cd ai-tax-agent
```
### 2. Choose Environment
```bash
# Local development
export ENV=local
# Development server
export ENV=development
# Production server
export ENV=production
```
### 3. Setup Environment File
```bash
# Copy template
cp infra/environments/$ENV/.env.example infra/environments/$ENV/.env
# Edit configuration
vim infra/environments/$ENV/.env
```
### 4. Generate Secrets (Dev/Prod only)
```bash
./scripts/generate-production-secrets.sh
```
### 5. Deploy
```bash
# Setup networks
./infra/scripts/setup-networks.sh
# Deploy all services
./infra/scripts/deploy.sh $ENV all
```
---
## Local Development
### Setup
1. **Create environment file**:
```bash
cp infra/environments/local/.env.example infra/environments/local/.env
```
2. **Edit configuration**:
```bash
vim infra/environments/local/.env
```
Key settings for local:
```env
DOMAIN=localhost
POSTGRES_PASSWORD=postgres
MINIO_ROOT_PASSWORD=minioadmin
GRAFANA_PASSWORD=admin
```
3. **Generate self-signed certificates** (optional):
```bash
./scripts/generate-dev-certs.sh
```
### Deploy
```bash
# Setup networks
./infra/scripts/setup-networks.sh
# Deploy infrastructure
./infra/scripts/deploy.sh local infrastructure
# Deploy monitoring
./infra/scripts/deploy.sh local monitoring
# Deploy services
./infra/scripts/deploy.sh local services
```
### Access Services
- **Grafana**: http://localhost:3000 (admin/admin)
- **MinIO Console**: http://localhost:9093 (minioadmin/minioadmin)
- **Vault**: http://localhost:8200 (token: dev-root-token)
- **Traefik Dashboard**: http://localhost:8080
### Development Workflow
1. Make code changes
2. Build images: `./scripts/build-and-push-images.sh localhost:5000 latest local`
3. Restart services: `./infra/scripts/deploy.sh local services`
4. Test changes
5. Check logs: `docker compose -f infra/base/services.yaml --env-file infra/environments/local/.env logs -f`
---
## Development Server
### Prerequisites
- Server with Docker installed
- Domain: `dev.harkon.co.uk`
- GoDaddy API credentials
- SSH access to server
### Setup
1. **SSH to development server**:
```bash
ssh deploy@dev-server.harkon.co.uk
```
2. **Clone repository**:
```bash
cd /opt
git clone <repository-url> ai-tax-agent
cd ai-tax-agent
```
3. **Create environment file**:
```bash
cp infra/environments/development/.env.example infra/environments/development/.env
```
4. **Generate secrets**:
```bash
./scripts/generate-production-secrets.sh
```
5. **Edit environment file**:
```bash
vim infra/environments/development/.env
```
Update:
- `DOMAIN=dev.harkon.co.uk`
- `EMAIL=dev@harkon.co.uk`
- API keys
- Registry credentials
6. **Setup GoDaddy DNS**:
```bash
# Create Traefik provider file
vim infra/configs/traefik/.provider.env
```
Add:
```env
GODADDY_API_KEY=your-api-key
GODADDY_API_SECRET=your-api-secret
```
### Deploy
```bash
# Setup networks
./infra/scripts/setup-networks.sh
# Deploy infrastructure
./infra/scripts/deploy.sh development infrastructure
# Wait for services to be healthy
sleep 30
# Deploy monitoring
./infra/scripts/deploy.sh development monitoring
# Deploy services
./infra/scripts/deploy.sh development services
```
### Verify Deployment
```bash
# Check services
docker ps
# Check logs
docker compose -f infra/base/infrastructure.yaml --env-file infra/environments/development/.env logs -f
# Test endpoints
curl https://vault.dev.harkon.co.uk
curl https://grafana.dev.harkon.co.uk
```
### Access Services
- **Grafana**: https://grafana.dev.harkon.co.uk
- **MinIO**: https://minio.dev.harkon.co.uk
- **Vault**: https://vault.dev.harkon.co.uk
- **UI Review**: https://ui-review.dev.harkon.co.uk
---
## Production Server
### Prerequisites
- Production server (141.136.35.199)
- Domain: `harkon.co.uk`
- Existing Traefik, Authentik, Gitea
- SSH access as `deploy` user
### Pre-Deployment Checklist
- [ ] Backup existing data
- [ ] Test in development first
- [ ] Generate production secrets
- [ ] Update DNS records
- [ ] Configure Authentik OAuth providers
- [ ] Setup Gitea container registry
- [ ] Build and push Docker images
### Setup
1. **SSH to production server**:
```bash
ssh deploy@141.136.35.199
```
2. **Navigate to project**:
```bash
cd /opt/ai-tax-agent
git pull origin main
```
3. **Verify environment file**:
```bash
cat infra/environments/production/.env | grep DOMAIN
```
Should show:
```env
DOMAIN=harkon.co.uk
```
4. **Verify secrets are set**:
```bash
# Check all secrets are not CHANGE_ME
grep -i "CHANGE_ME" infra/environments/production/.env
```
Should return nothing.
### Deploy Infrastructure
```bash
# Setup networks (if not already created)
./infra/scripts/setup-networks.sh
# Deploy infrastructure services
./infra/scripts/deploy.sh production infrastructure
```
This deploys:
- Vault (secrets management)
- MinIO (object storage)
- PostgreSQL (relational database)
- Neo4j (graph database)
- Qdrant (vector database)
- Redis (cache)
- NATS (message queue)
### Deploy Monitoring
```bash
./infra/scripts/deploy.sh production monitoring
```
This deploys:
- Prometheus (metrics)
- Grafana (dashboards)
- Loki (logs)
- Promtail (log collector)
### Deploy Services
```bash
./infra/scripts/deploy.sh production services
```
This deploys all 14 microservices.
### Post-Deployment
1. **Verify all services are running**:
```bash
docker ps | grep ai-tax-agent
```
2. **Check health**:
```bash
curl https://vault.harkon.co.uk/v1/sys/health
curl https://minio-api.harkon.co.uk/minio/health/live
```
3. **Configure Authentik OAuth**:
- Create OAuth providers for each service
- Update environment variables with client secrets
- Restart services
4. **Initialize Vault**:
```bash
# Access Vault
docker exec -it vault sh
# Initialize (if first time)
vault operator init
# Unseal (if needed)
vault operator unseal
```
5. **Setup MinIO buckets**:
```bash
# Access MinIO console
# https://minio.harkon.co.uk
# Create buckets:
# - documents
# - embeddings
# - models
# - backups
```
### Access Services
All services available at `https://<service>.harkon.co.uk`:
- **UI Review**: https://ui-review.harkon.co.uk
- **Grafana**: https://grafana.harkon.co.uk
- **Prometheus**: https://prometheus.harkon.co.uk
- **Vault**: https://vault.harkon.co.uk
- **MinIO**: https://minio.harkon.co.uk
---
## Troubleshooting
### Services Not Starting
```bash
# Check logs
docker compose -f infra/base/infrastructure.yaml --env-file infra/environments/production/.env logs -f
# Check specific service
docker logs vault
# Check Docker daemon
sudo systemctl status docker
```
### Network Issues
```bash
# Check networks exist
docker network ls | grep -E "frontend|backend"
# Inspect network
docker network inspect frontend
# Recreate networks
docker network rm frontend backend
./infra/scripts/setup-networks.sh
```
### Traefik Routing Issues
```bash
# Check Traefik logs
docker logs traefik | grep -i error
# Check container labels
docker inspect vault | grep -A 20 Labels
# Check Traefik dashboard
https://traefik.harkon.co.uk/dashboard/
```
### Database Connection Issues
```bash
# Check PostgreSQL
docker exec -it postgres psql -U postgres -c "\l"
# Check Neo4j
docker exec -it neo4j cypher-shell -u neo4j -p $NEO4J_PASSWORD
# Check Redis
docker exec -it redis redis-cli ping
```
### Volume/Data Issues
```bash
# List volumes
docker volume ls
# Inspect volume
docker volume inspect postgres_data
# Backup volume
docker run --rm -v postgres_data:/data -v $(pwd):/backup alpine tar czf /backup/postgres_backup.tar.gz /data
```
### SSL Certificate Issues
```bash
# Check Traefik logs for ACME errors
docker logs traefik | grep -i acme
# Check GoDaddy credentials
cat infra/configs/traefik/.provider.env
# Force certificate renewal
docker exec traefik rm -rf /var/traefik/certs/acme.json
docker restart traefik
```
---
## Maintenance
### Update Services
```bash
# Pull latest code
git pull origin main
# Rebuild images
./scripts/build-and-push-images.sh gitea.harkon.co.uk v1.0.2 harkon
# Deploy updates
./infra/scripts/deploy.sh production services --pull
```
### Backup Data
```bash
# Backup all volumes
./scripts/backup-volumes.sh production
# Backup specific service
docker run --rm -v postgres_data:/data -v $(pwd):/backup alpine tar czf /backup/postgres_backup.tar.gz /data
```
### Scale Services
```bash
# Scale a service
docker compose -f infra/base/services.yaml --env-file infra/environments/production/.env up -d --scale svc-ingestion=3
```
### View Logs
```bash
# All services
docker compose -f infra/base/services.yaml --env-file infra/environments/production/.env logs -f
# Specific service
docker logs -f svc-ingestion
# With Loki (via Grafana)
https://grafana.harkon.co.uk/explore
```
---
## Security Best Practices
1. **Rotate secrets regularly** - Use `generate-production-secrets.sh`
2. **Use Authentik SSO** - Enable for all services
3. **Keep images updated** - Regular security patches
4. **Monitor logs** - Check for suspicious activity
5. **Backup regularly** - Automated daily backups
6. **Use strong passwords** - Minimum 32 characters
7. **Limit network exposure** - Only expose necessary ports
8. **Enable audit logging** - Track all access
---
## Support
For issues:
1. Check logs
2. Review documentation
3. Check Traefik dashboard
4. Verify environment variables
5. Test in development first

View File

@@ -1,415 +0,0 @@
# AI Tax Agent Infrastructure - Final Structure
## Overview
The infrastructure is organized into two main categories:
1. **External Services** - Production-only services deployed individually
2. **Application Infrastructure** - Multi-environment services for the application
---
## Directory Structure
```
ai-tax-agent/
├── infra/
│ ├── compose/ # External services (production)
│ │ ├── traefik/ # Reverse proxy
│ │ │ ├── compose.yaml
│ │ │ ├── config/ # Traefik configuration (source of truth)
│ │ │ ├── certs/
│ │ │ └── .provider.env
│ │ ├── authentik/ # SSO provider
│ │ │ ├── compose.yaml
│ │ │ ├── .env
│ │ │ ├── media/
│ │ │ └── custom-templates/
│ │ ├── gitea/ # Git + Container Registry
│ │ │ ├── compose.yaml
│ │ │ └── .env
│ │ ├── nextcloud/ # File storage
│ │ │ └── compose.yaml
│ │ ├── portainer/ # Docker management
│ │ │ └── docker-compose.yaml
│ │ ├── docker-compose.local.yml # Local dev (all-in-one)
│ │ ├── docker-compose.backend.yml # Backend services
│ │ └── README.md
│ │
│ ├── base/ # Application infrastructure (multi-env)
│ │ ├── infrastructure.yaml # Core services (Vault, MinIO, DBs, etc.)
│ │ ├── services.yaml # Application microservices (14 services)
│ │ └── monitoring.yaml # Monitoring stack (Prometheus, Grafana, Loki)
│ │
│ ├── environments/ # Environment-specific configs
│ │ ├── local/
│ │ │ ├── .env.example
│ │ │ └── .env # Local development config
│ │ ├── development/
│ │ │ ├── .env.example
│ │ │ └── .env # Development server config
│ │ └── production/
│ │ ├── .env.example
│ │ └── .env # Production server config
│ │
│ ├── configs/ # Application service configs
│ │ ├── traefik/
│ │ │ └── app-middlewares.yml # App-specific Traefik middlewares
│ │ ├── authentik/
│ │ │ └── bootstrap.yaml # App-specific Authentik bootstrap
│ │ ├── grafana/
│ │ │ ├── dashboards/
│ │ │ └── provisioning/
│ │ ├── prometheus/
│ │ │ └── prometheus.yml
│ │ ├── loki/
│ │ │ └── loki-config.yml
│ │ └── vault/
│ │ └── config/
│ │
│ ├── docker/ # Dockerfile templates
│ │ ├── base-runtime.Dockerfile
│ │ ├── base-ml.Dockerfile
│ │ └── Dockerfile.ml-service.template
│ │
│ ├── certs/ # SSL certificates
│ │ ├── local/
│ │ ├── development/
│ │ └── production/
│ │
│ ├── scripts/ # Infrastructure deployment scripts
│ │ ├── deploy.sh # Deploy application infrastructure
│ │ ├── setup-networks.sh # Create Docker networks
│ │ └── reorganize-structure.sh
│ │
│ ├── README.md # Main infrastructure docs
│ ├── QUICK_START.md # Quick start guide
│ ├── DEPLOYMENT_GUIDE.md # Complete deployment guide
│ ├── MIGRATION_GUIDE.md # Migration from old structure
│ ├── STRUCTURE_OVERVIEW.md # Architecture overview
│ ├── STRUCTURE_CLEANUP.md # Cleanup plan
│ └── FINAL_STRUCTURE.md # This file
├── scripts/ # Project-wide scripts
│ ├── deploy-external.sh # Deploy external services
│ ├── cleanup-infra-structure.sh # Cleanup and align structure
│ ├── build-and-push-images.sh # Build and push Docker images
│ ├── generate-secrets.sh # Generate secrets
│ └── ...
└── Makefile # Project commands
```
---
## Deployment Workflows
### 1. Local Development
```bash
# Option A: Use Makefile (recommended)
make bootstrap
make run
# Option B: Use compose directly
cd infra/compose
docker compose -f docker-compose.local.yml up -d
# Option C: Use new multi-env structure
cp infra/environments/local/.env.example infra/environments/local/.env
./infra/scripts/setup-networks.sh
./infra/scripts/deploy.sh local all
```
### 2. Production - External Services
Deploy individually on remote server:
```bash
# SSH to server
ssh deploy@141.136.35.199
# Deploy all external services
cd /opt/ai-tax-agent
./scripts/deploy-external.sh all
# Or deploy individually
cd /opt/ai-tax-agent/infra/compose/traefik
docker compose up -d
cd /opt/ai-tax-agent/infra/compose/authentik
docker compose up -d
cd /opt/ai-tax-agent/infra/compose/gitea
docker compose up -d
```
### 3. Production - Application Infrastructure
```bash
# SSH to server
ssh deploy@141.136.35.199
cd /opt/ai-tax-agent
# Deploy infrastructure
./infra/scripts/deploy.sh production infrastructure
# Deploy monitoring
./infra/scripts/deploy.sh production monitoring
# Deploy services
./infra/scripts/deploy.sh production services
# Or use Makefile
make deploy-infra-prod
make deploy-monitoring-prod
make deploy-services-prod
```
---
## Makefile Commands
### Local Development
```bash
make bootstrap # Setup development environment
make run # Start all services (local)
make stop # Stop all services
make restart # Restart all services
make logs # Show logs from all services
make status # Show status of all services
make health # Check health of all services
```
### External Services (Production)
```bash
make deploy-external # Deploy all external services
make deploy-traefik # Deploy Traefik only
make deploy-authentik # Deploy Authentik only
make deploy-gitea # Deploy Gitea only
make deploy-nextcloud # Deploy Nextcloud only
make deploy-portainer # Deploy Portainer only
```
### Application Infrastructure (Multi-Environment)
```bash
# Local
make deploy-infra-local
make deploy-services-local
make deploy-monitoring-local
# Development
make deploy-infra-dev
make deploy-services-dev
make deploy-monitoring-dev
# Production
make deploy-infra-prod
make deploy-services-prod
make deploy-monitoring-prod
```
### Development Tools
```bash
make test # Run all tests
make lint # Run linting
make format # Format code
make build # Build Docker images
make clean # Clean up containers and volumes
```
---
## Configuration Management
### External Services
Each external service has its own configuration:
- **Traefik**: `infra/compose/traefik/config/` (source of truth)
- **Authentik**: `infra/compose/authentik/.env`
- **Gitea**: `infra/compose/gitea/.env`
### Application Infrastructure
Application-specific configurations:
- **Environment Variables**: `infra/environments/<env>/.env`
- **Traefik Middlewares**: `infra/configs/traefik/app-middlewares.yml`
- **Authentik Bootstrap**: `infra/configs/authentik/bootstrap.yaml`
- **Grafana Dashboards**: `infra/configs/grafana/dashboards/`
- **Prometheus Config**: `infra/configs/prometheus/prometheus.yml`
---
## Key Differences
### External Services vs Application Infrastructure
| Aspect | External Services | Application Infrastructure |
|--------|------------------|---------------------------|
| **Location** | `infra/compose/` | `infra/base/` + `infra/environments/` |
| **Deployment** | Individual compose files | Unified deployment script |
| **Environment** | Production only | Local, Dev, Prod |
| **Purpose** | Shared company services | AI Tax Agent application |
| **Examples** | Traefik, Authentik, Gitea | Vault, MinIO, Microservices |
---
## Networks
All services use two shared Docker networks:
- **frontend**: Public-facing services (connected to Traefik)
- **backend**: Internal services (databases, message queues)
Create networks:
```bash
docker network create frontend
docker network create backend
# Or use script
./infra/scripts/setup-networks.sh
# Or use Makefile
make networks
```
---
## Service Access
### Local Development
- **Grafana**: http://localhost:3000
- **MinIO**: http://localhost:9093
- **Vault**: http://localhost:8200
- **Traefik Dashboard**: http://localhost:8080
### Production
- **Traefik**: https://traefik.harkon.co.uk
- **Authentik**: https://authentik.harkon.co.uk
- **Gitea**: https://gitea.harkon.co.uk
- **Grafana**: https://grafana.harkon.co.uk
- **MinIO**: https://minio.harkon.co.uk
- **Vault**: https://vault.harkon.co.uk
- **UI Review**: https://ui-review.harkon.co.uk
---
## Best Practices
### 1. Configuration Management
- ✅ External service configs live with their compose files
- ✅ Application configs live in `infra/configs/`
- ✅ Environment-specific settings in `.env` files
- ✅ Never commit `.env` files (use `.env.example`)
### 2. Deployment
- ✅ Test in local first
- ✅ Deploy to development before production
- ✅ Deploy external services before application infrastructure
- ✅ Deploy infrastructure before services
### 3. Secrets Management
- ✅ Use `./scripts/generate-secrets.sh` for production
- ✅ Store secrets in `.env` files (gitignored)
- ✅ Use Vault for runtime secrets
- ✅ Rotate secrets regularly
### 4. Monitoring
- ✅ Check logs after deployment
- ✅ Verify health endpoints
- ✅ Monitor Grafana dashboards
- ✅ Set up alerts for production
---
## Troubleshooting
### Services Not Starting
```bash
# Check logs
docker compose logs -f <service>
# Check status
docker ps -a
# Check networks
docker network ls
docker network inspect frontend
```
### Configuration Issues
```bash
# Verify environment file
cat infra/environments/production/.env | grep DOMAIN
# Check compose file syntax
docker compose -f infra/base/infrastructure.yaml config
# Validate Traefik config
docker exec traefik traefik version
```
### Network Issues
```bash
# Recreate networks
docker network rm frontend backend
./infra/scripts/setup-networks.sh
# Check network connectivity
docker exec <service> ping <other-service>
```
---
## Migration from Old Structure
If you have the old structure, run:
```bash
./scripts/cleanup-infra-structure.sh
```
This will:
- Remove duplicate configurations
- Align Traefik configs
- Create app-specific middlewares
- Update .gitignore
- Create documentation
---
## Next Steps
1. ✅ Structure cleaned up and aligned
2. 📖 Read [QUICK_START.md](QUICK_START.md) for quick deployment
3. 📚 Read [DEPLOYMENT_GUIDE.md](DEPLOYMENT_GUIDE.md) for detailed instructions
4. 🧪 Test local deployment: `make run`
5. 🚀 Deploy to production: `make deploy-infra-prod`
---
## Support
For issues or questions:
1. Check logs: `make logs`
2. Check health: `make health`
3. Review documentation in `infra/`
4. Check Traefik dashboard for routing issues

View File

@@ -1,312 +0,0 @@
# Infrastructure Migration Guide
This guide helps you migrate from the old infrastructure structure to the new organized multi-environment setup.
## Old Structure vs New Structure
### Old Structure
```
infra/
├── compose/
│ ├── docker-compose.local.yml (1013 lines - everything)
│ ├── docker-compose.backend.yml (1014 lines - everything)
│ ├── authentik/compose.yaml
│ ├── gitea/compose.yaml
│ ├── nextcloud/compose.yaml
│ ├── portainer/docker-compose.yaml
│ └── traefik/compose.yaml
├── production/
│ ├── infrastructure.yaml
│ ├── services.yaml
│ └── monitoring.yaml
├── .env.production
└── various config folders
```
### New Structure
```
infra/
├── base/ # Shared compose files
│ ├── infrastructure.yaml
│ ├── services.yaml
│ ├── monitoring.yaml
│ └── external.yaml
├── environments/ # Environment-specific configs
│ ├── local/.env
│ ├── development/.env
│ └── production/.env
├── configs/ # Service configurations
│ ├── traefik/
│ ├── grafana/
│ ├── prometheus/
│ └── ...
└── scripts/
└── deploy.sh # Unified deployment script
```
## Migration Steps
### Step 1: Backup Current Setup
```bash
# Backup current environment files
cp infra/.env.production infra/.env.production.backup
cp infra/compose/.env infra/compose/.env.backup
# Backup compose files
tar -czf infra-backup-$(date +%Y%m%d).tar.gz infra/
```
### Step 2: Stop Current Services (if migrating live)
```bash
# Stop services (if running)
cd infra/compose
docker compose -f docker-compose.local.yml down
# Or for production
cd infra/production
docker compose -f infrastructure.yaml down
docker compose -f services.yaml down
docker compose -f monitoring.yaml down
```
### Step 3: Create Environment Files
```bash
# For local development
cp infra/environments/local/.env.example infra/environments/local/.env
vim infra/environments/local/.env
# For development server
cp infra/environments/development/.env.example infra/environments/development/.env
vim infra/environments/development/.env
# For production (copy from existing)
cp infra/.env.production infra/environments/production/.env
```
### Step 4: Move Configuration Files
```bash
# Move Traefik configs
cp -r infra/traefik/* infra/configs/traefik/
# Move Grafana configs
cp -r infra/grafana/* infra/configs/grafana/
# Move Prometheus configs
cp -r infra/prometheus/* infra/configs/prometheus/
# Move Loki configs
cp -r infra/loki/* infra/configs/loki/
# Move Vault configs
cp -r infra/vault/* infra/configs/vault/
# Move Authentik configs
cp -r infra/authentik/* infra/configs/authentik/
```
### Step 5: Update Volume Names (if needed)
If you want to preserve existing data, you have two options:
#### Option A: Keep Existing Volumes (Recommended)
The new compose files use the same volume names, so your data will be preserved automatically.
#### Option B: Rename Volumes
If you want environment-specific volume names:
```bash
# List current volumes
docker volume ls
# Rename volumes (example for production)
docker volume create prod_postgres_data
docker run --rm -v postgres_data:/from -v prod_postgres_data:/to alpine sh -c "cd /from && cp -av . /to"
# Repeat for each volume
```
### Step 6: Setup Networks
```bash
# Create Docker networks
./infra/scripts/setup-networks.sh
```
### Step 7: Deploy New Structure
```bash
# For local
./infra/scripts/deploy.sh local all
# For development
./infra/scripts/deploy.sh development all
# For production
./infra/scripts/deploy.sh production all
```
### Step 8: Verify Services
```bash
# Check running services
docker ps
# Check logs
docker compose -f infra/base/infrastructure.yaml --env-file infra/environments/production/.env logs -f
# Test endpoints
curl https://vault.harkon.co.uk
curl https://minio.harkon.co.uk
curl https://grafana.harkon.co.uk
```
## Handling External Services
If you have existing Traefik, Authentik, Gitea, Nextcloud, or Portainer:
### Option 1: Keep Existing (Recommended for Production)
Don't deploy `external.yaml`. Just ensure:
1. Networks are shared:
```yaml
networks:
frontend:
external: true
backend:
external: true
```
2. Services can discover each other via network
### Option 2: Migrate to New Structure
1. Stop existing services
2. Update their compose files to use new structure
3. Deploy via `external.yaml`
## Environment-Specific Differences
### Local Development
- Uses `localhost` or `*.local.harkon.co.uk`
- Self-signed SSL certificates
- Simple passwords
- Optional Authentik
- Traefik dashboard exposed on port 8080
### Development Server
- Uses `*.dev.harkon.co.uk`
- Let's Encrypt SSL via DNS-01 challenge
- Strong passwords (generated)
- Authentik SSO enabled
- Gitea container registry
### Production Server
- Uses `*.harkon.co.uk`
- Let's Encrypt SSL via DNS-01 challenge
- Strong passwords (generated)
- Authentik SSO enabled
- Gitea container registry
- No debug ports exposed
## Troubleshooting
### Issue: Services can't find each other
**Solution**: Ensure networks are created and services are on the correct networks
```bash
docker network ls
docker network inspect frontend
docker network inspect backend
```
### Issue: Volumes not found
**Solution**: Check volume names match
```bash
docker volume ls
docker compose -f infra/base/infrastructure.yaml --env-file infra/environments/production/.env config
```
### Issue: Environment variables not loaded
**Solution**: Check .env file exists and is in correct location
```bash
ls -la infra/environments/production/.env
cat infra/environments/production/.env | grep DOMAIN
```
### Issue: Traefik routing not working
**Solution**: Check labels and ensure Traefik can see containers
```bash
docker logs traefik | grep -i error
docker inspect <container> | grep -A 20 Labels
```
## Rollback Plan
If migration fails:
```bash
# Stop new services
./infra/scripts/deploy.sh production down
# Restore old structure
cd infra/compose
docker compose -f docker-compose.backend.yml up -d
# Or for production
cd infra/production
docker compose -f infrastructure.yaml up -d
docker compose -f services.yaml up -d
docker compose -f monitoring.yaml up -d
```
## Post-Migration Cleanup
After successful migration and verification:
```bash
# Remove old compose files (optional)
rm -rf infra/compose/docker-compose.*.yml
# Remove old production folder (optional)
rm -rf infra/production.old
# Remove backup files
rm infra/.env.production.backup
rm infra-backup-*.tar.gz
```
## Benefits of New Structure
**Multi-environment support** - Easy to deploy to local, dev, prod
**Cleaner organization** - Configs separated by purpose
**Unified deployment** - Single script for all environments
**Better security** - Environment-specific secrets
**Easier maintenance** - Clear separation of concerns
**Scalable** - Easy to add new environments or services
## Next Steps
1. Test in local environment first
2. Deploy to development server
3. Verify all services work
4. Deploy to production
5. Update documentation
6. Train team on new structure

View File

@@ -1,349 +0,0 @@
# Quick Start Guide
Get AI Tax Agent infrastructure running in 5 minutes!
## Prerequisites
- Docker 24.0+ with Compose V2
- Git
- 10GB free disk space
## Local Development (Fastest)
### 1. Create Environment File
```bash
cp infra/environments/local/.env.example infra/environments/local/.env
```
### 2. Setup Networks
```bash
./infra/scripts/setup-networks.sh
```
### 3. Deploy
```bash
./infra/scripts/deploy.sh local all
```
### 4. Access Services
- **Grafana**: http://localhost:3000 (admin/admin)
- **MinIO**: http://localhost:9093 (minioadmin/minioadmin)
- **Vault**: http://localhost:8200 (token: dev-root-token)
- **Traefik Dashboard**: http://localhost:8080
### 5. Build and Run Services
```bash
# Build images
./scripts/build-and-push-images.sh localhost:5000 latest local
# Services will auto-start via deploy script
```
---
## Development Server
### 1. SSH to Server
```bash
ssh deploy@dev-server.harkon.co.uk
cd /opt/ai-tax-agent
```
### 2. Create Environment File
```bash
cp infra/environments/development/.env.example infra/environments/development/.env
```
### 3. Generate Secrets
```bash
./scripts/generate-production-secrets.sh
```
### 4. Edit Environment
```bash
vim infra/environments/development/.env
```
Update:
- `DOMAIN=dev.harkon.co.uk`
- API keys
- Registry credentials
### 5. Deploy
```bash
./infra/scripts/setup-networks.sh
./infra/scripts/deploy.sh development all
```
### 6. Access
- https://grafana.dev.harkon.co.uk
- https://minio.dev.harkon.co.uk
- https://vault.dev.harkon.co.uk
---
## Production Server
### 1. SSH to Server
```bash
ssh deploy@141.136.35.199
cd /opt/ai-tax-agent
```
### 2. Verify Environment File
```bash
# Should already exist from previous setup
cat infra/environments/production/.env | grep DOMAIN
```
### 3. Deploy Infrastructure
```bash
./infra/scripts/setup-networks.sh
./infra/scripts/deploy.sh production infrastructure
```
### 4. Deploy Monitoring
```bash
./infra/scripts/deploy.sh production monitoring
```
### 5. Deploy Services
```bash
./infra/scripts/deploy.sh production services
```
### 6. Access
- https://grafana.harkon.co.uk
- https://minio.harkon.co.uk
- https://vault.harkon.co.uk
- https://ui-review.harkon.co.uk
---
## Common Commands
### Deploy Specific Stack
```bash
# Infrastructure only
./infra/scripts/deploy.sh production infrastructure
# Monitoring only
./infra/scripts/deploy.sh production monitoring
# Services only
./infra/scripts/deploy.sh production services
```
### Stop Services
```bash
./infra/scripts/deploy.sh production down
```
### View Logs
```bash
# All services
docker compose -f infra/base/infrastructure.yaml --env-file infra/environments/production/.env logs -f
# Specific service
docker logs -f vault
```
### Restart Service
```bash
docker restart vault
```
### Check Status
```bash
docker ps
```
---
## Troubleshooting
### Services Not Starting
```bash
# Check logs
docker compose -f infra/base/infrastructure.yaml --env-file infra/environments/production/.env logs
# Check specific service
docker logs vault
```
### Network Issues
```bash
# Verify networks exist
docker network ls | grep -E "frontend|backend"
# Recreate networks
docker network rm frontend backend
./infra/scripts/setup-networks.sh
```
### Environment Variables Not Loading
```bash
# Verify .env file exists
ls -la infra/environments/production/.env
# Check variables
cat infra/environments/production/.env | grep DOMAIN
```
---
## Next Steps
1. ✅ Infrastructure running
2. 📖 Read [DEPLOYMENT_GUIDE.md](DEPLOYMENT_GUIDE.md) for detailed instructions
3. 🔧 Configure Authentik OAuth providers
4. 🚀 Deploy application services
5. 📊 Setup Grafana dashboards
6. 🔐 Initialize Vault secrets
---
## Support
- **Documentation**: See `infra/README.md`
- **Deployment Guide**: See `infra/DEPLOYMENT_GUIDE.md`
- **Migration Guide**: See `infra/MIGRATION_GUIDE.md`
- **Structure Overview**: See `infra/STRUCTURE_OVERVIEW.md`
---
## Architecture Overview
```
┌─────────────────────────────────────────────────────────────┐
│ Traefik │
│ (Reverse Proxy) │
└─────────────────────────────────────────────────────────────┘
┌───────────────────┼───────────────────┐
│ │ │
┌───────▼────────┐ ┌──────▼──────┐ ┌────────▼────────┐
│ Authentik │ │ Monitoring │ │ Application │
│ (SSO) │ │ (Grafana) │ │ Services │
└────────────────┘ └──────────────┘ └─────────────────┘
┌───────────────────┼───────────────────┐
│ │ │
┌───────▼────────┐ ┌──────▼──────┐ ┌────────▼────────┐
│ PostgreSQL │ │ Neo4j │ │ Qdrant │
└────────────────┘ └──────────────┘ └─────────────────┘
│ │ │
┌───────▼────────┐ ┌──────▼──────┐ ┌────────▼────────┐
│ MinIO │ │ Redis │ │ NATS │
└────────────────┘ └──────────────┘ └─────────────────┘
```
---
## Environment Comparison
| Feature | Local | Development | Production |
|---------|-------|-------------|------------|
| Domain | localhost | dev.harkon.co.uk | harkon.co.uk |
| SSL | Self-signed | Let's Encrypt | Let's Encrypt |
| Auth | Optional | Authentik | Authentik |
| Passwords | Simple | Strong | Strong |
| Monitoring | Optional | Full | Full |
| Backups | No | Daily | Daily |
---
## Service Ports (Local)
| Service | Port | URL |
|---------|------|-----|
| Traefik Dashboard | 8080 | http://localhost:8080 |
| Grafana | 3000 | http://localhost:3000 |
| MinIO Console | 9093 | http://localhost:9093 |
| Vault | 8200 | http://localhost:8200 |
| PostgreSQL | 5432 | localhost:5432 |
| Neo4j | 7474 | http://localhost:7474 |
| Redis | 6379 | localhost:6379 |
| Qdrant | 6333 | http://localhost:6333 |
---
## Deployment Checklist
### Before Deployment
- [ ] Environment file created
- [ ] Secrets generated (dev/prod)
- [ ] Docker networks created
- [ ] DNS configured (dev/prod)
- [ ] GoDaddy API credentials set (dev/prod)
- [ ] Gitea registry configured (dev/prod)
### After Deployment
- [ ] All services running (`docker ps`)
- [ ] Services accessible via URLs
- [ ] Grafana dashboards loaded
- [ ] Vault initialized
- [ ] MinIO buckets created
- [ ] Authentik configured (dev/prod)
- [ ] Monitoring alerts configured
---
## Quick Reference
### Environment Files
- Local: `infra/environments/local/.env`
- Development: `infra/environments/development/.env`
- Production: `infra/environments/production/.env`
### Compose Files
- Infrastructure: `infra/base/infrastructure.yaml`
- Services: `infra/base/services.yaml`
- Monitoring: `infra/base/monitoring.yaml`
- External: `infra/base/external.yaml`
### Scripts
- Deploy: `./infra/scripts/deploy.sh <env> <stack>`
- Setup Networks: `./infra/scripts/setup-networks.sh`
- Reorganize: `./infra/scripts/reorganize-structure.sh`
---
**Ready to deploy? Start with local development!**
```bash
cp infra/environments/local/.env.example infra/environments/local/.env
./infra/scripts/setup-networks.sh
./infra/scripts/deploy.sh local all
```

View File

@@ -1,243 +0,0 @@
# Infrastructure Structure Cleanup Plan
## Current Situation
We have two parallel structures that need to be aligned:
### 1. External Services (Production/Remote)
Located in `infra/compose/` - These are deployed individually on the remote server:
- **Traefik** - `infra/compose/traefik/`
- **Authentik** - `infra/compose/authentik/`
- **Gitea** - `infra/compose/gitea/`
- **Nextcloud** - `infra/compose/nextcloud/`
- **Portainer** - `infra/compose/portainer/`
### 2. Application Infrastructure (Multi-Environment)
Located in `infra/base/` and `infra/environments/`:
- Infrastructure services (Vault, MinIO, DBs, etc.)
- Application services (14 microservices)
- Monitoring stack (Prometheus, Grafana, Loki)
### 3. Configuration Duplication
- `infra/compose/traefik/config/` - Production Traefik config
- `infra/configs/traefik/` - Application Traefik config (copied)
- Similar duplication for other services
---
## Cleanup Strategy
### Phase 1: Consolidate Configurations
#### Traefik
- **Keep**: `infra/compose/traefik/` as the source of truth for production
- **Symlink**: `infra/configs/traefik/``../compose/traefik/`
- **Reason**: External service configs should live with their compose files
#### Authentik
- **Keep**: `infra/compose/authentik/` for production
- **Keep**: `infra/configs/authentik/` for application-specific bootstrap
- **Reason**: Different purposes - one for service, one for app integration
#### Grafana/Prometheus/Loki
- **Keep**: `infra/configs/grafana/`, `infra/configs/prometheus/`, `infra/configs/loki/`
- **Reason**: These are application-specific, not external services
### Phase 2: Update References
#### Makefile
- Update paths to reference correct locations
- Add targets for external service deployment
- Separate local dev from production deployment
#### Scripts
- Update `scripts/deploy.sh` to handle external services
- Create `scripts/deploy-external.sh` for production external services
- Update `infra/scripts/deploy.sh` for application infrastructure
### Phase 3: Documentation
- Clear separation between:
- External services (production only)
- Application infrastructure (multi-environment)
- Development environment (local only)
---
## Proposed Final Structure
```
ai-tax-agent/
├── infra/
│ ├── compose/ # External services (production)
│ │ ├── traefik/
│ │ │ ├── compose.yaml # Traefik service definition
│ │ │ ├── config/ # Traefik configuration
│ │ │ ├── certs/ # SSL certificates
│ │ │ └── .provider.env # GoDaddy API credentials
│ │ ├── authentik/
│ │ │ ├── compose.yaml
│ │ │ ├── .env
│ │ │ ├── media/
│ │ │ └── custom-templates/
│ │ ├── gitea/
│ │ │ ├── compose.yaml
│ │ │ └── .env
│ │ ├── nextcloud/
│ │ │ └── compose.yaml
│ │ ├── portainer/
│ │ │ └── docker-compose.yaml
│ │ ├── docker-compose.local.yml # Local dev (all-in-one)
│ │ └── docker-compose.backend.yml # Backend services
│ │
│ ├── base/ # Application infrastructure (multi-env)
│ │ ├── infrastructure.yaml # Core infra services
│ │ ├── services.yaml # Application microservices
│ │ └── monitoring.yaml # Monitoring stack
│ │
│ ├── environments/ # Environment-specific configs
│ │ ├── local/
│ │ │ ├── .env.example
│ │ │ └── .env
│ │ ├── development/
│ │ │ ├── .env.example
│ │ │ └── .env
│ │ └── production/
│ │ ├── .env.example
│ │ └── .env
│ │
│ ├── configs/ # Application service configs
│ │ ├── authentik/ # App-specific Authentik bootstrap
│ │ ├── grafana/ # Grafana dashboards
│ │ ├── prometheus/ # Prometheus scrape configs
│ │ ├── loki/ # Loki config
│ │ └── vault/ # Vault config
│ │
│ └── scripts/ # Infrastructure deployment scripts
│ ├── deploy.sh # Deploy application infrastructure
│ ├── setup-networks.sh # Create Docker networks
│ └── reorganize-structure.sh
├── scripts/ # Project-wide scripts
│ ├── deploy-external.sh # Deploy external services (production)
│ ├── build-and-push-images.sh # Build and push Docker images
│ ├── generate-secrets.sh # Generate secrets
│ └── ...
└── Makefile # Project commands
```
---
## Deployment Workflows
### Local Development
```bash
# Use all-in-one compose file
make bootstrap
make run
# OR
cd infra/compose
docker compose -f docker-compose.local.yml up -d
```
### Production - External Services
```bash
# Deploy individually on remote server
cd /opt/ai-tax-agent/infra/compose/traefik
docker compose up -d
cd /opt/ai-tax-agent/infra/compose/authentik
docker compose up -d
cd /opt/ai-tax-agent/infra/compose/gitea
docker compose up -d
```
### Production - Application Infrastructure
```bash
# Deploy application infrastructure
./infra/scripts/deploy.sh production infrastructure
./infra/scripts/deploy.sh production monitoring
./infra/scripts/deploy.sh production services
```
---
## Migration Steps
### Step 1: Align Traefik Configs
```bash
# Remove duplicate configs
rm -rf infra/configs/traefik/config/traefik-dynamic.yml
# Keep only app-specific middleware
# Move production configs to compose/traefik/config/
```
### Step 2: Update Makefile
- Add targets for external service deployment
- Update paths to reference correct locations
- Separate local dev from production
### Step 3: Update Scripts
- Create `scripts/deploy-external.sh` for production
- Update `infra/scripts/deploy.sh` for application infra
- Update all path references
### Step 4: Documentation
- Update README files
- Create deployment guides for each environment
- Document external vs application services
---
## Key Decisions
### 1. External Services Location
**Decision**: Keep in `infra/compose/` with individual folders
**Reason**: These are production-only, deployed separately, have their own configs
### 2. Application Infrastructure Location
**Decision**: Keep in `infra/base/` with environment-specific `.env` files
**Reason**: Multi-environment support, shared compose files
### 3. Configuration Management
**Decision**:
- External service configs live with their compose files
- Application configs live in `infra/configs/`
**Reason**: Clear separation of concerns
### 4. Makefile Targets
**Decision**:
- `make run` - Local development (all-in-one)
- `make deploy-external` - Production external services
- `make deploy-infra` - Application infrastructure
**Reason**: Clear separation of deployment targets
---
## Benefits
**Clear Separation** - External vs application services
**No Duplication** - Single source of truth for configs
**Multi-Environment** - Easy to deploy to local/dev/prod
**Maintainable** - Logical organization
**Scalable** - Easy to add new services
**Production-Ready** - Matches actual deployment
---
## Next Steps
1. Run cleanup script to align configurations
2. Update Makefile with new targets
3. Update deployment scripts
4. Test local deployment
5. Test production deployment
6. Update documentation

View File

@@ -1,346 +0,0 @@
# Infrastructure Structure Overview
## New Multi-Environment Structure
```
infra/
├── README.md # Main infrastructure documentation
├── DEPLOYMENT_GUIDE.md # Complete deployment guide
├── MIGRATION_GUIDE.md # Migration from old structure
├── STRUCTURE_OVERVIEW.md # This file
├── base/ # Base compose files (environment-agnostic)
│ ├── infrastructure.yaml # Core infrastructure services
│ ├── services.yaml # Application microservices
│ ├── monitoring.yaml # Monitoring stack
│ └── external.yaml # External services (Traefik, Authentik, etc.)
├── environments/ # Environment-specific configurations
│ ├── local/ # Local development
│ │ ├── .env.example # Template
│ │ └── .env # Actual config (gitignored)
│ ├── development/ # Development server
│ │ ├── .env.example # Template
│ │ └── .env # Actual config (gitignored)
│ └── production/ # Production server
│ ├── .env.example # Template
│ └── .env # Actual config (gitignored)
├── configs/ # Service configuration files
│ ├── traefik/ # Traefik configs
│ │ ├── config/ # Dynamic configuration
│ │ │ ├── middlewares.yml
│ │ │ ├── routers.yml
│ │ │ └── services.yml
│ │ ├── traefik.yml # Static configuration
│ │ └── .provider.env # GoDaddy API credentials (gitignored)
│ ├── grafana/ # Grafana configs
│ │ ├── dashboards/ # Dashboard JSON files
│ │ └── provisioning/ # Datasources, dashboards
│ ├── prometheus/ # Prometheus config
│ │ └── prometheus.yml
│ ├── loki/ # Loki config
│ │ └── loki-config.yml
│ ├── promtail/ # Promtail config
│ │ └── promtail-config.yml
│ ├── vault/ # Vault config
│ │ └── config/
│ └── authentik/ # Authentik bootstrap
│ ├── bootstrap.yaml
│ ├── custom-templates/
│ └── media/
├── certs/ # SSL certificates (gitignored)
│ ├── local/ # Self-signed certs
│ ├── development/ # Let's Encrypt certs
│ └── production/ # Let's Encrypt certs
├── docker/ # Dockerfile templates
│ ├── base-runtime.Dockerfile # Base image for all services
│ ├── base-ml.Dockerfile # Base image for ML services
│ └── Dockerfile.ml-service.template
└── scripts/ # Deployment and utility scripts
├── deploy.sh # Main deployment script
├── setup-networks.sh # Create Docker networks
└── cleanup.sh # Cleanup script
```
## Base Compose Files
### infrastructure.yaml
Core infrastructure services needed by the application:
- **Vault** - Secrets management
- **MinIO** - Object storage (S3-compatible)
- **PostgreSQL** - Relational database
- **Neo4j** - Graph database
- **Qdrant** - Vector database
- **Redis** - Cache and session store
- **NATS** - Message queue (with JetStream)
### services.yaml
Application microservices (14 services):
- **svc-ingestion** - Document ingestion
- **svc-extract** - Data extraction
- **svc-kg** - Knowledge graph
- **svc-rag-indexer** - RAG indexing (ML)
- **svc-rag-retriever** - RAG retrieval (ML)
- **svc-forms** - Form processing
- **svc-hmrc** - HMRC integration
- **svc-ocr** - OCR processing (ML)
- **svc-rpa** - RPA automation
- **svc-normalize-map** - Data normalization
- **svc-reason** - Reasoning engine
- **svc-firm-connectors** - Firm integrations
- **svc-coverage** - Coverage analysis
- **ui-review** - Review UI (Next.js)
### monitoring.yaml
Monitoring and observability stack:
- **Prometheus** - Metrics collection
- **Grafana** - Dashboards and visualization
- **Loki** - Log aggregation
- **Promtail** - Log collection
### external.yaml (optional)
External services that may already exist:
- **Traefik** - Reverse proxy and load balancer
- **Authentik** - SSO and authentication
- **Gitea** - Git repository and container registry
- **Nextcloud** - File storage
- **Portainer** - Docker management UI
## Environment Configurations
### Local Development
- **Domain**: `localhost` or `*.local.harkon.co.uk`
- **SSL**: Self-signed certificates
- **Auth**: Optional (can disable Authentik)
- **Registry**: Local Docker registry or Gitea
- **Passwords**: Simple (postgres, admin, etc.)
- **Purpose**: Local development and testing
- **Traefik Dashboard**: Exposed on port 8080
### Development Server
- **Domain**: `*.dev.harkon.co.uk`
- **SSL**: Let's Encrypt (DNS-01 via GoDaddy)
- **Auth**: Authentik SSO enabled
- **Registry**: Gitea container registry
- **Passwords**: Strong (auto-generated)
- **Purpose**: Staging and integration testing
- **Traefik Dashboard**: Protected by Authentik
### Production Server
- **Domain**: `*.harkon.co.uk`
- **SSL**: Let's Encrypt (DNS-01 via GoDaddy)
- **Auth**: Authentik SSO enabled
- **Registry**: Gitea container registry
- **Passwords**: Strong (auto-generated)
- **Purpose**: Production deployment
- **Traefik Dashboard**: Protected by Authentik
- **Monitoring**: Full stack enabled
## Docker Networks
All environments use two networks:
### frontend
- Public-facing services
- Connected to Traefik
- Services: UI, Grafana, Vault, MinIO console
### backend
- Internal services
- Not directly accessible
- Services: Databases, message queues, internal APIs
## Volume Naming
Volumes are named consistently across environments:
- `postgres_data`
- `neo4j_data`
- `neo4j_logs`
- `qdrant_data`
- `minio_data`
- `vault_data`
- `redis_data`
- `nats_data`
- `prometheus_data`
- `grafana_data`
- `loki_data`
## Deployment Workflow
### 1. Setup Environment
```bash
cp infra/environments/production/.env.example infra/environments/production/.env
vim infra/environments/production/.env
```
### 2. Generate Secrets
```bash
./scripts/generate-production-secrets.sh
```
### 3. Setup Networks
```bash
./infra/scripts/setup-networks.sh
```
### 4. Deploy Infrastructure
```bash
./infra/scripts/deploy.sh production infrastructure
```
### 5. Deploy Monitoring
```bash
./infra/scripts/deploy.sh production monitoring
```
### 6. Deploy Services
```bash
./infra/scripts/deploy.sh production services
```
## Key Features
### ✅ Multi-Environment Support
Single codebase deploys to local, development, and production with environment-specific configurations.
### ✅ Modular Architecture
Services split into logical groups (infrastructure, monitoring, services, external) for independent deployment.
### ✅ Unified Deployment
Single `deploy.sh` script handles all environments and stacks.
### ✅ Environment Isolation
Each environment has its own `.env` file with appropriate secrets and configurations.
### ✅ Shared Configurations
Common service configs in `configs/` directory, referenced by all environments.
### ✅ Security Best Practices
- Secrets in gitignored `.env` files
- Strong password generation
- Authentik SSO integration
- SSL/TLS everywhere (Let's Encrypt)
### ✅ Easy Maintenance
- Clear directory structure
- Comprehensive documentation
- Migration guide from old structure
- Troubleshooting guides
## Service Access
### Local
- http://localhost:3000 - Grafana
- http://localhost:9093 - MinIO
- http://localhost:8200 - Vault
- http://localhost:8080 - Traefik Dashboard
### Development
- https://grafana.dev.harkon.co.uk
- https://minio.dev.harkon.co.uk
- https://vault.dev.harkon.co.uk
- https://ui-review.dev.harkon.co.uk
### Production
- https://grafana.harkon.co.uk
- https://minio.harkon.co.uk
- https://vault.harkon.co.uk
- https://ui-review.harkon.co.uk
## Configuration Management
### Environment Variables
All configuration via environment variables in `.env` files:
- Domain settings
- Database passwords
- API keys
- OAuth secrets
- Registry credentials
### Service Configs
Static configurations in `configs/` directory:
- Traefik routing rules
- Grafana dashboards
- Prometheus scrape configs
- Loki retention policies
### Secrets Management
- Development/Production: Vault
- Local: Environment variables
- Rotation: `generate-production-secrets.sh`
## Monitoring and Observability
### Metrics (Prometheus)
- Service health
- Resource usage
- Request rates
- Error rates
### Logs (Loki)
- Centralized logging
- Query via Grafana
- Retention policies
- Log aggregation
### Dashboards (Grafana)
- Infrastructure overview
- Service metrics
- Application performance
- Business metrics
### Alerts
- Prometheus AlertManager
- Slack/Email notifications
- PagerDuty integration
## Backup Strategy
### What to Backup
- PostgreSQL database
- Neo4j graph data
- Vault secrets
- MinIO objects
- Qdrant vectors
- Grafana dashboards
### How to Backup
```bash
# Automated backup script
./scripts/backup-volumes.sh production
# Manual backup
docker run --rm -v postgres_data:/data -v $(pwd):/backup alpine tar czf /backup/postgres.tar.gz /data
```
### Backup Schedule
- Daily: Databases
- Weekly: Full system
- Monthly: Archive
## Disaster Recovery
### Recovery Steps
1. Restore infrastructure
2. Restore volumes from backup
3. Deploy services
4. Verify functionality
5. Update DNS if needed
### RTO/RPO
- **RTO**: 4 hours (Recovery Time Objective)
- **RPO**: 24 hours (Recovery Point Objective)
## Next Steps
1. Review [DEPLOYMENT_GUIDE.md](DEPLOYMENT_GUIDE.md) for deployment instructions
2. Review [MIGRATION_GUIDE.md](MIGRATION_GUIDE.md) if migrating from old structure
3. Setup environment files
4. Deploy to local first
5. Test in development
6. Deploy to production

View File

@@ -6,10 +6,10 @@
networks: networks:
frontend: frontend:
external: true external: true
name: frontend name: apa-frontend
backend: backend:
external: true external: true
name: backend name: apa-backend
volumes: volumes:
postgres_data: postgres_data:
@@ -22,10 +22,121 @@ volumes:
nats_data: nats_data:
services: services:
# Edge Gateway & SSO
apa-traefik:
image: docker.io/library/traefik:v3.5.1
container_name: apa-traefik
restart: unless-stopped
networks:
- frontend
- backend
ports:
- 80:80
- 443:443
- 8080:8080
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./traefik/config/:/etc/traefik/:ro
# Identity & SSO (Authentik)
apa-authentik-db:
image: postgres:15-alpine
container_name: apa-authentik-db
restart: unless-stopped
networks:
- backend
volumes:
- postgres_data:/var/lib/postgresql/data
environment:
POSTGRES_DB: authentik
POSTGRES_USER: authentik
POSTGRES_PASSWORD: ${AUTHENTIK_DB_PASSWORD}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U authentik"]
interval: 30s
timeout: 10s
retries: 3
apa-authentik-redis:
image: redis:7-alpine
container_name: apa-authentik-redis
restart: unless-stopped
networks:
- backend
command: --save 60 1 --loglevel warning
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
interval: 30s
timeout: 10s
retries: 3
apa-authentik-server:
image: ghcr.io/goauthentik/server:2025.8.3
container_name: apa-authentik-server
restart: unless-stopped
networks:
- backend
- frontend
command: server
environment:
AUTHENTIK_REDIS__HOST: apa-authentik-redis
AUTHENTIK_POSTGRESQL__HOST: apa-authentik-db
AUTHENTIK_POSTGRESQL__USER: authentik
AUTHENTIK_POSTGRESQL__NAME: authentik
AUTHENTIK_POSTGRESQL__PASSWORD: ${AUTHENTIK_DB_PASSWORD}
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY}
AUTHENTIK_ERROR_REPORTING__ENABLED: false
depends_on:
- apa-authentik-db
- apa-authentik-redis
labels:
- "traefik.enable=true"
- "traefik.http.routers.authentik.rule=Host(`auth.${DOMAIN}`)"
- "traefik.http.routers.authentik.entrypoints=websecure"
- "traefik.http.routers.authentik.tls=true"
- "traefik.http.routers.authentik.tls.certresolver=godaddy"
- "traefik.http.services.authentik.loadbalancer.server.port=9000"
apa-authentik-worker:
image: ghcr.io/goauthentik/server:2025.8.3
container_name: apa-authentik-worker
restart: unless-stopped
networks:
- backend
command: worker
environment:
AUTHENTIK_REDIS__HOST: apa-authentik-redis
AUTHENTIK_POSTGRESQL__HOST: apa-authentik-db
AUTHENTIK_POSTGRESQL__USER: authentik
AUTHENTIK_POSTGRESQL__NAME: authentik
AUTHENTIK_POSTGRESQL__PASSWORD: ${AUTHENTIK_DB_PASSWORD}
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY}
AUTHENTIK_ERROR_REPORTING__ENABLED: false
depends_on:
- apa-authentik-db
- apa-authentik-redis
apa-authentik-outpost:
image: ghcr.io/goauthentik/proxy:2025.8.3
container_name: apa-authentik-outpost
restart: unless-stopped
networks:
- backend
- frontend
environment:
AUTHENTIK_HOST: http://apa-authentik-server:9000
AUTHENTIK_INSECURE: true
AUTHENTIK_TOKEN: ${AUTHENTIK_OUTPOST_TOKEN}
AUTHENTIK_REDIS__HOST: apa-authentik-redis
AUTHENTIK_REDIS__PORT: 6379
depends_on:
- apa-authentik-server
- apa-authentik-redis
# Secrets Management # Secrets Management
vault: apa-vault:
image: hashicorp/vault:1.15 image: hashicorp/vault:1.15
container_name: vault container_name: apa-vault
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -48,9 +159,9 @@ services:
- "traefik.http.services.vault.loadbalancer.server.port=8200" - "traefik.http.services.vault.loadbalancer.server.port=8200"
# Object Storage # Object Storage
minio: apa-minio:
image: minio/minio:RELEASE.2025-09-07T16-13-09Z image: minio/minio:RELEASE.2025-09-07T16-13-09Z
container_name: minio container_name: apa-minio
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -85,9 +196,9 @@ services:
- "traefik.http.services.minio-console.loadbalancer.server.port=9093" - "traefik.http.services.minio-console.loadbalancer.server.port=9093"
# Vector Database # Vector Database
qdrant: apa-qdrant:
image: qdrant/qdrant:v1.7.4 image: qdrant/qdrant:v1.7.4
container_name: qdrant container_name: apa-qdrant
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -108,9 +219,9 @@ services:
- "traefik.http.services.qdrant.loadbalancer.server.port=6333" - "traefik.http.services.qdrant.loadbalancer.server.port=6333"
# Knowledge Graph Database # Knowledge Graph Database
neo4j: apa-neo4j:
image: neo4j:5.15-community image: neo4j:5.15-community
container_name: neo4j container_name: apa-neo4j
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -136,9 +247,9 @@ services:
- "traefik.http.services.neo4j.loadbalancer.server.port=7474" - "traefik.http.services.neo4j.loadbalancer.server.port=7474"
# Secure Client Data Store # Secure Client Data Store
postgres: apa-postgres:
image: postgres:15-alpine image: postgres:15-alpine
container_name: postgres container_name: apa-postgres
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -169,9 +280,9 @@ services:
retries: 3 retries: 3
# Cache & Session Store # Cache & Session Store
redis: apa-redis:
image: redis:7-alpine image: redis:7-alpine
container_name: redis container_name: apa-redis
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -190,9 +301,9 @@ services:
retries: 3 retries: 3
# Message Broker & Event Streaming # Message Broker & Event Streaming
nats: apa-nats:
image: nats:2.10-alpine image: nats:2.10-alpine
container_name: nats container_name: apa-nats
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend

View File

@@ -5,10 +5,10 @@
networks: networks:
frontend: frontend:
external: true external: true
name: frontend name: apa-frontend
backend: backend:
external: true external: true
name: backend name: apa-backend
volumes: volumes:
prometheus_data: prometheus_data:
@@ -17,9 +17,9 @@ volumes:
services: services:
# Metrics Collection # Metrics Collection
prometheus: apa-prometheus:
image: prom/prometheus:v2.48.1 image: prom/prometheus:v2.48.1
container_name: prometheus container_name: apa-prometheus
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -44,9 +44,9 @@ services:
- "traefik.http.services.prometheus.loadbalancer.server.port=9090" - "traefik.http.services.prometheus.loadbalancer.server.port=9090"
# Visualization & Dashboards # Visualization & Dashboards
grafana: apa-grafana:
image: grafana/grafana:10.2.3 image: grafana/grafana:10.2.3
container_name: grafana container_name: apa-grafana
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -65,9 +65,9 @@ services:
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: ${GRAFANA_OAUTH_CLIENT_ID} GF_AUTH_GENERIC_OAUTH_CLIENT_ID: ${GRAFANA_OAUTH_CLIENT_ID}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET: ${GRAFANA_OAUTH_CLIENT_SECRET} GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET: ${GRAFANA_OAUTH_CLIENT_SECRET}
GF_AUTH_GENERIC_OAUTH_SCOPES: openid profile email groups GF_AUTH_GENERIC_OAUTH_SCOPES: openid profile email groups
GF_AUTH_GENERIC_OAUTH_AUTH_URL: https://authentik.${DOMAIN}/application/o/authorize/ GF_AUTH_GENERIC_OAUTH_AUTH_URL: https://auth.${DOMAIN}/application/o/authorize/
GF_AUTH_GENERIC_OAUTH_TOKEN_URL: https://authentik.${DOMAIN}/application/o/token/ GF_AUTH_GENERIC_OAUTH_TOKEN_URL: https://auth.${DOMAIN}/application/o/token/
GF_AUTH_GENERIC_OAUTH_API_URL: https://authentik.${DOMAIN}/application/o/userinfo/ GF_AUTH_GENERIC_OAUTH_API_URL: https://auth.${DOMAIN}/application/o/userinfo/
GF_AUTH_GENERIC_OAUTH_AUTO_LOGIN: false GF_AUTH_GENERIC_OAUTH_AUTO_LOGIN: false
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: true GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: true
GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH: role GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH: role
@@ -89,9 +89,9 @@ services:
- "traefik.http.services.grafana.loadbalancer.server.port=3000" - "traefik.http.services.grafana.loadbalancer.server.port=3000"
# Log Aggregation # Log Aggregation
loki: apa-loki:
image: grafana/loki:2.9.4 image: grafana/loki:2.9.4
container_name: loki container_name: apa-loki
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -110,9 +110,9 @@ services:
- "traefik.http.services.loki.loadbalancer.server.port=3100" - "traefik.http.services.loki.loadbalancer.server.port=3100"
# Log Shipper (for Docker containers) # Log Shipper (for Docker containers)
promtail: apa-promtail:
image: grafana/promtail:2.9.4 image: grafana/promtail:2.9.4
container_name: promtail container_name: apa-promtail
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -122,5 +122,4 @@ services:
- ./loki/promtail-config.yml:/etc/promtail/config.yml:ro - ./loki/promtail-config.yml:/etc/promtail/config.yml:ro
command: -config.file=/etc/promtail/config.yml command: -config.file=/etc/promtail/config.yml
depends_on: depends_on:
- loki - apa-loki

View File

@@ -6,31 +6,31 @@
networks: networks:
frontend: frontend:
external: true external: true
name: frontend name: apa-frontend
backend: backend:
external: true external: true
name: backend name: apa-backend
services: services:
# Document Ingestion Service # Document Ingestion Service
svc-ingestion: apa-svc-ingestion:
image: gitea.harkon.co.uk/harkon/svc-ingestion:latest image: gitea.harkon.co.uk/harkon/svc-ingestion:latest
container_name: svc-ingestion container_name: apa-svc-ingestion
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=apa-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ROOT_USER} - MINIO_ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD} - MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@apa-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://apa-redis:6379
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
- NATS_SERVERS=${NATS_SERVERS} - NATS_SERVERS=${NATS_SERVERS}
- NATS_STREAM_NAME=${NATS_STREAM_NAME} - NATS_STREAM_NAME=${NATS_STREAM_NAME}
@@ -45,24 +45,24 @@ services:
- "traefik.http.services.svc-ingestion.loadbalancer.server.port=8000" - "traefik.http.services.svc-ingestion.loadbalancer.server.port=8000"
# Data Extraction Service # Data Extraction Service
svc-extract: apa-svc-extract:
image: gitea.harkon.co.uk/harkon/svc-extract:latest image: gitea.harkon.co.uk/harkon/svc-extract:latest
container_name: svc-extract container_name: apa-svc-extract
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=apa-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ROOT_USER} - MINIO_ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD} - MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@apa-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://apa-redis:6379
- RAG_EMBEDDING_MODEL=${RAG_EMBEDDING_MODEL} - RAG_EMBEDDING_MODEL=${RAG_EMBEDDING_MODEL}
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
- NATS_SERVERS=${NATS_SERVERS} - NATS_SERVERS=${NATS_SERVERS}
@@ -78,17 +78,17 @@ services:
- "traefik.http.services.svc-extract.loadbalancer.server.port=8000" - "traefik.http.services.svc-extract.loadbalancer.server.port=8000"
# Knowledge Graph Service # Knowledge Graph Service
svc-kg: apa-svc-kg:
image: gitea.harkon.co.uk/harkon/svc-kg:latest image: gitea.harkon.co.uk/harkon/svc-kg:latest
container_name: svc-kg container_name: apa-svc-kg
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- NEO4J_URI=bolt://neo4j:7687 - NEO4J_URI=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
@@ -105,18 +105,18 @@ services:
- "traefik.http.services.svc-kg.loadbalancer.server.port=8000" - "traefik.http.services.svc-kg.loadbalancer.server.port=8000"
# RAG Retrieval Service # RAG Retrieval Service
svc-rag-retriever: apa-svc-rag-retriever:
image: gitea.harkon.co.uk/harkon/svc-rag-retriever:latest image: gitea.harkon.co.uk/harkon/svc-rag-retriever:latest
container_name: svc-rag-retriever container_name: apa-svc-rag-retriever
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://apa-qdrant:6333
- NEO4J_URI=bolt://neo4j:7687 - NEO4J_URI=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- RAG_EMBEDDING_MODEL=${RAG_EMBEDDING_MODEL} - RAG_EMBEDDING_MODEL=${RAG_EMBEDDING_MODEL}
@@ -135,25 +135,25 @@ services:
- "traefik.http.services.svc-rag-retriever.loadbalancer.server.port=8000" - "traefik.http.services.svc-rag-retriever.loadbalancer.server.port=8000"
# Forms Service # Forms Service
svc-forms: apa-svc-forms:
image: gitea.harkon.co.uk/harkon/svc-forms:latest image: gitea.harkon.co.uk/harkon/svc-forms:latest
container_name: svc-forms container_name: apa-svc-forms
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@apa-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://apa-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=apa-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ROOT_USER} - MINIO_ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD} - MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://apa-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
- NATS_SERVERS=${NATS_SERVERS} - NATS_SERVERS=${NATS_SERVERS}
- NATS_STREAM_NAME=${NATS_STREAM_NAME} - NATS_STREAM_NAME=${NATS_STREAM_NAME}
@@ -168,25 +168,25 @@ services:
- "traefik.http.services.svc-forms.loadbalancer.server.port=8000" - "traefik.http.services.svc-forms.loadbalancer.server.port=8000"
# HMRC Integration Service # HMRC Integration Service
svc-hmrc: apa-svc-hmrc:
image: gitea.harkon.co.uk/harkon/svc-hmrc:latest image: gitea.harkon.co.uk/harkon/svc-hmrc:latest
container_name: svc-hmrc container_name: apa-svc-hmrc
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@apa-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://apa-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=apa-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ROOT_USER} - MINIO_ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD} - MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://apa-qdrant:6333
- HMRC_MTD_ITSA_MODE=${HMRC_MTD_ITSA_MODE} - HMRC_MTD_ITSA_MODE=${HMRC_MTD_ITSA_MODE}
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
- NATS_SERVERS=${NATS_SERVERS} - NATS_SERVERS=${NATS_SERVERS}
@@ -202,25 +202,25 @@ services:
- "traefik.http.services.svc-hmrc.loadbalancer.server.port=8000" - "traefik.http.services.svc-hmrc.loadbalancer.server.port=8000"
# OCR Service # OCR Service
svc-ocr: apa-svc-ocr:
image: gitea.harkon.co.uk/harkon/svc-ocr:latest image: gitea.harkon.co.uk/harkon/svc-ocr:latest
container_name: svc-ocr container_name: apa-svc-ocr
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@apa-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://apa-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=apa-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ROOT_USER} - MINIO_ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD} - MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://apa-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
- NATS_SERVERS=${NATS_SERVERS} - NATS_SERVERS=${NATS_SERVERS}
- NATS_STREAM_NAME=${NATS_STREAM_NAME} - NATS_STREAM_NAME=${NATS_STREAM_NAME}
@@ -235,25 +235,25 @@ services:
- "traefik.http.services.svc-ocr.loadbalancer.server.port=8000" - "traefik.http.services.svc-ocr.loadbalancer.server.port=8000"
# RAG Indexer Service # RAG Indexer Service
svc-rag-indexer: apa-svc-rag-indexer:
image: gitea.harkon.co.uk/harkon/svc-rag-indexer:latest image: gitea.harkon.co.uk/harkon/svc-rag-indexer:latest
container_name: svc-rag-indexer container_name: apa-svc-rag-indexer
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@apa-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://apa-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=apa-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ROOT_USER} - MINIO_ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD} - MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://apa-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
- NATS_SERVERS=${NATS_SERVERS} - NATS_SERVERS=${NATS_SERVERS}
- NATS_STREAM_NAME=${NATS_STREAM_NAME} - NATS_STREAM_NAME=${NATS_STREAM_NAME}
@@ -268,25 +268,25 @@ services:
- "traefik.http.services.svc-rag-indexer.loadbalancer.server.port=8000" - "traefik.http.services.svc-rag-indexer.loadbalancer.server.port=8000"
# Reasoning Service # Reasoning Service
svc-reason: apa-svc-reason:
image: gitea.harkon.co.uk/harkon/svc-reason:latest image: gitea.harkon.co.uk/harkon/svc-reason:latest
container_name: svc-reason container_name: apa-svc-reason
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@apa-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://apa-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=apa-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ROOT_USER} - MINIO_ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD} - MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://apa-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
- NATS_SERVERS=${NATS_SERVERS} - NATS_SERVERS=${NATS_SERVERS}
- NATS_STREAM_NAME=${NATS_STREAM_NAME} - NATS_STREAM_NAME=${NATS_STREAM_NAME}
@@ -301,25 +301,25 @@ services:
- "traefik.http.services.svc-reason.loadbalancer.server.port=8000" - "traefik.http.services.svc-reason.loadbalancer.server.port=8000"
# RPA Service # RPA Service
svc-rpa: apa-svc-rpa:
image: gitea.harkon.co.uk/harkon/svc-rpa:latest image: gitea.harkon.co.uk/harkon/svc-rpa:latest
container_name: svc-rpa container_name: apa-svc-rpa
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@apa-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://apa-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=apa-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ROOT_USER} - MINIO_ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD} - MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://apa-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
- NATS_SERVERS=${NATS_SERVERS} - NATS_SERVERS=${NATS_SERVERS}
- NATS_STREAM_NAME=${NATS_STREAM_NAME} - NATS_STREAM_NAME=${NATS_STREAM_NAME}
@@ -334,25 +334,25 @@ services:
- "traefik.http.services.svc-rpa.loadbalancer.server.port=8000" - "traefik.http.services.svc-rpa.loadbalancer.server.port=8000"
# Normalize & Map Service # Normalize & Map Service
svc-normalize-map: apa-svc-normalize-map:
image: gitea.harkon.co.uk/harkon/svc-normalize-map:latest image: gitea.harkon.co.uk/harkon/svc-normalize-map:latest
container_name: svc-normalize-map container_name: apa-svc-normalize-map
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@apa-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://apa-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=apa-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ROOT_USER} - MINIO_ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD} - MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://apa-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
- NATS_SERVERS=${NATS_SERVERS} - NATS_SERVERS=${NATS_SERVERS}
- NATS_STREAM_NAME=${NATS_STREAM_NAME} - NATS_STREAM_NAME=${NATS_STREAM_NAME}
@@ -367,25 +367,25 @@ services:
- "traefik.http.services.svc-normalize-map.loadbalancer.server.port=8000" - "traefik.http.services.svc-normalize-map.loadbalancer.server.port=8000"
# Coverage Service # Coverage Service
svc-coverage: apa-svc-coverage:
image: gitea.harkon.co.uk/harkon/svc-coverage:latest image: gitea.harkon.co.uk/harkon/svc-coverage:latest
container_name: svc-coverage container_name: apa-svc-coverage
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@apa-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://apa-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=apa-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ROOT_USER} - MINIO_ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD} - MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://apa-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
- NATS_SERVERS=${NATS_SERVERS} - NATS_SERVERS=${NATS_SERVERS}
- NATS_STREAM_NAME=${NATS_STREAM_NAME} - NATS_STREAM_NAME=${NATS_STREAM_NAME}
@@ -400,25 +400,25 @@ services:
- "traefik.http.services.svc-coverage.loadbalancer.server.port=8000" - "traefik.http.services.svc-coverage.loadbalancer.server.port=8000"
# Firm Connectors Service # Firm Connectors Service
svc-firm-connectors: apa-svc-firm-connectors:
image: gitea.harkon.co.uk/harkon/svc-firm-connectors:latest image: gitea.harkon.co.uk/harkon/svc-firm-connectors:latest
container_name: svc-firm-connectors container_name: apa-svc-firm-connectors
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://apa-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD}@apa-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://apa-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD} - NEO4J_PASSWORD=${NEO4J_PASSWORD}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://apa-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=apa-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ROOT_USER} - MINIO_ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD} - MINIO_SECRET_KEY=${MINIO_ROOT_PASSWORD}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://apa-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE}
- NATS_SERVERS=${NATS_SERVERS} - NATS_SERVERS=${NATS_SERVERS}
- NATS_STREAM_NAME=${NATS_STREAM_NAME} - NATS_STREAM_NAME=${NATS_STREAM_NAME}
@@ -433,9 +433,9 @@ services:
- "traefik.http.services.svc-firm-connectors.loadbalancer.server.port=8000" - "traefik.http.services.svc-firm-connectors.loadbalancer.server.port=8000"
# Review UI # Review UI
ui-review: apa-ui-review:
image: gitea.harkon.co.uk/harkon/ui-review:latest image: gitea.harkon.co.uk/harkon/ui-review:latest
container_name: ui-review container_name: apa-ui-review
restart: unless-stopped restart: unless-stopped
networks: networks:
- frontend - frontend

View File

@@ -1,9 +1,8 @@
# --> (Example) Securely expose apps using the Traefik proxy outpost...
http: http:
middlewares: middlewares:
authentik: authentik-forwardauth:
forwardAuth: forwardAuth:
address: http://authentik-server:9000/outpost.goauthentik.io/auth/traefik address: "http://apa-authentik-outpost:9000/outpost.goauthentik.io/auth/traefik"
trustForwardHeader: true trustForwardHeader: true
authResponseHeaders: authResponseHeaders:
- X-authentik-username - X-authentik-username

View File

@@ -0,0 +1,33 @@
# Static Traefik configuration (production)
entryPoints:
web:
address: ":80"
websecure:
address: ":443"
api:
dashboard: true
providers:
docker:
endpoint: "unix:///var/run/docker.sock"
exposedByDefault: false
network: "apa-frontend"
file:
filename: "/etc/traefik/traefik-dynamic.yml"
watch: true
# -- Configure your CertificateResolver here...
certificatesResolvers:
godaddy:
acme:
email: info@harkon.co.uk
storage: /var/traefik/certs/godaddy-acme.json
caServer: "https://acme-v02.api.letsencrypt.org/directory"
dnsChallenge:
provider: godaddy
resolvers:
- 1.1.1.1:53
- 8.8.8.8:53
- 97.74.103.44:53
- 173.201.71.44:53

View File

@@ -1,127 +0,0 @@
---
services:
authentik-server:
image: ghcr.io/goauthentik/server:2025.8.1
container_name: authentik-server
command: server
environment:
- AUTHENTIK_REDIS__HOST=authentik-redis
- AUTHENTIK_POSTGRESQL__HOST=authentik-postgres
- AUTHENTIK_POSTGRESQL__USER=${POSTGRES_USER:-authentik}
- AUTHENTIK_POSTGRESQL__NAME=${POSTGRES_DB:-authentik}
- AUTHENTIK_POSTGRESQL__PASSWORD=${POSTGRES_PASSWORD:?error}
- AUTHENTIK_SECRET_KEY=${AUTHENTIK_SECRET_KEY:?error}
- AUTHENTIK_ERROR_REPORTING__ENABLED=${AUTHENTIK_ERROR_REPORTING:-false}
labels:
# (Optional) Enable Traefik integration for the Authentik Web UI. For more information
# about integrating other services with Traefik and Authentik, see the
# documentation at https://goauthentik.io/docs/outposts/integrations/traefik
# and the middleware example files in `docker-compose/traefik/config`.
- traefik.enable=true
- traefik.http.services.authentik.loadbalancer.server.port=9000
- traefik.http.services.authentik.loadbalancer.server.scheme=http
- traefik.http.routers.authentik.entrypoints=websecure
- traefik.http.routers.authentik.rule=Host(`authentik.harkon.co.uk`)
- traefik.http.routers.authentik.tls=true
- traefik.http.routers.authentik.tls.certresolver=godaddy
- traefik.http.routers.authentik.service=authentik
volumes:
- ./media:/media
- ./custom-templates:/templates
depends_on:
- authentik-postgres
- authentik-redis
networks:
- frontend
- backend
restart: unless-stopped
authentik-worker:
image: ghcr.io/goauthentik/server:2025.8.1
container_name: authentik-worker
command: worker
environment:
- AUTHENTIK_REDIS__HOST=authentik-redis
- AUTHENTIK_POSTGRESQL__HOST=authentik-postgres
- AUTHENTIK_POSTGRESQL__USER=${POSTGRES_USER:-authentik}
- AUTHENTIK_POSTGRESQL__NAME=${POSTGRES_DB:-authentik}
- AUTHENTIK_POSTGRESQL__PASSWORD=${POSTGRES_PASSWORD:?error}
- AUTHENTIK_SECRET_KEY=${AUTHENTIK_SECRET_KEY:?error}
- AUTHENTIK_ERROR_REPORTING__ENABLED=${AUTHENTIK_ERROR_REPORTING:-false}
# (Optional) Enable Email Sending
# Highly recommended to notify you about alerts and configuration issues.
# - AUTHENTIK_EMAIL__HOST=${EMAIL_HOST:?error}
# - AUTHENTIK_EMAIL__PORT=${EMAIL_PORT:-25}
# - AUTHENTIK_EMAIL__USERNAME=${EMAIL_USERNAME:?error}
# - AUTHENTIK_EMAIL__PASSWORD=${EMAIL_PASSWORD:?error}
# - AUTHENTIK_EMAIL__USE_TLS=${EMAIL_USE_TLS:-false}
# - AUTHENTIK_EMAIL__USE_SSL=${EMAIL_USE_SSL:-false}
# - AUTHENTIK_EMAIL__TIMEOUT=${EMAIL_TIMEOUT:-10}
# - AUTHENTIK_EMAIL__FROM=${EMAIL_FROM:?error}
# (Optional) See more for the docker socket integration here:
# https://goauthentik.io/docs/outposts/integrations/docker
user: root
volumes:
- /run/docker.sock:/run/docker.sock
- ./media:/media
- ./certs:/certs
- ./custom-templates:/templates
depends_on:
- authentik-postgres
- authentik-redis
networks:
- backend
restart: unless-stopped
authentik-redis:
image: docker.io/library/redis:8.2.1
container_name: authentik-redis
command: --save 60 1 --loglevel warning
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
volumes:
- redis_data:/data
networks:
- backend
restart: unless-stopped
authentik-postgres:
# (Optional) Add a PostgreSQL Database for Authentik
# Alternatively, you can host your PostgreSQL database externally, and
# change the connection settings in the `authentik-server` and
# `authentik-worker`.
image: docker.io/library/postgres:17.6
container_name: authentik-db
environment:
- POSTGRES_USER=${POSTGRES_USER:-authentik}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?error}
- POSTGRES_DB=${POSTGRES_DB:-authentik}
- TZ=${TZ:-UTC}
healthcheck:
test: ["CMD-SHELL", 'pg_isready -U "${POSTGRES_USER:-authentik}"']
start_period: 30s
interval: 10s
timeout: 10s
retries: 5
volumes:
- postgres_data:/var/lib/postgresql/data
networks:
- backend
restart: unless-stopped
volumes:
postgres_data:
driver: local
redis_data:
driver: local
networks:
frontend:
external: true
backend:
external: true

View File

@@ -1,990 +0,0 @@
# FILE: infra/compose/docker-compose.local.yml
# Traefik (with Authentik ForwardAuth), Authentik, Vault, MinIO, Qdrant, Neo4j, Postgres, Redis, Prometheus/Grafana, Loki, Unleash, all services
networks:
frontend:
external: true
name: ai-tax-agent-frontend
backend:
external: true
name: ai-tax-agent-backend
volumes:
postgres_data:
neo4j_data:
neo4j_logs:
qdrant_data:
minio_data:
vault_data:
redis_data:
nats_data:
prometheus_data:
grafana_data:
loki_data:
authentik_data:
portainer-data:
services:
# Identity & SSO
authentik-db:
image: postgres:15-alpine
container_name: authentik-db
restart: unless-stopped
networks:
- backend
volumes:
- authentik_data:/var/lib/postgresql/data
environment:
POSTGRES_DB: authentik
POSTGRES_USER: authentik
POSTGRES_PASSWORD: ${AUTHENTIK_DB_PASSWORD:-authentik}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U authentik"]
interval: 30s
timeout: 10s
retries: 3
authentik-redis:
image: redis:7-alpine
container_name: authentik-redis
restart: unless-stopped
networks:
- backend
command: --save 60 1 --loglevel warning
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
interval: 30s
timeout: 10s
retries: 3
authentik-server:
image: ghcr.io/goauthentik/server:2025.8.3
container_name: authentik-server
restart: unless-stopped
networks:
- backend
- frontend
command: server
environment:
AUTHENTIK_REDIS__HOST: authentik-redis
AUTHENTIK_POSTGRESQL__HOST: authentik-db
AUTHENTIK_POSTGRESQL__USER: authentik
AUTHENTIK_POSTGRESQL__NAME: authentik
AUTHENTIK_POSTGRESQL__PASSWORD: ${AUTHENTIK_DB_PASSWORD:-authentik}
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:-changeme}
AUTHENTIK_ERROR_REPORTING__ENABLED: false
# Optional bootstrap for automated setup (create admin and API token)
AUTHENTIK_BOOTSTRAP_EMAIL: ${AUTHENTIK_BOOTSTRAP_EMAIL:-admin@local.lan}
AUTHENTIK_BOOTSTRAP_PASSWORD: ${AUTHENTIK_BOOTSTRAP_PASSWORD:-admin123}
AUTHENTIK_BOOTSTRAP_TOKEN: ${AUTHENTIK_BOOTSTRAP_TOKEN:-}
volumes:
- ./authentik/media:/media
- ./authentik/custom-templates:/templates
- ./authentik/bootstrap.yaml:/blueprints/bootstrap.yaml
depends_on:
- authentik-db
- authentik-redis
labels:
- "traefik.enable=true"
- "traefik.http.routers.authentik.rule=Host(`auth.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.authentik.entrypoints=websecure"
- "traefik.http.routers.authentik.tls=true"
- "traefik.docker.network=ai-tax-agent-frontend"
- "traefik.http.services.authentik.loadbalancer.server.port=9000"
authentik-worker:
image: ghcr.io/goauthentik/server:2025.8.3
container_name: authentik-worker
restart: unless-stopped
networks:
- backend
command: worker
environment:
AUTHENTIK_REDIS__HOST: authentik-redis
AUTHENTIK_POSTGRESQL__HOST: authentik-db
AUTHENTIK_POSTGRESQL__USER: authentik
AUTHENTIK_POSTGRESQL__NAME: authentik
AUTHENTIK_POSTGRESQL__PASSWORD: ${AUTHENTIK_DB_PASSWORD:-authentik}
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:-changeme}
AUTHENTIK_ERROR_REPORTING__ENABLED: false
volumes:
- ./authentik/media:/media
- ./authentik/custom-templates:/templates
depends_on:
- authentik-db
- authentik-redis
authentik-outpost:
image: ghcr.io/goauthentik/proxy:2025.8.3
container_name: authentik-outpost
restart: unless-stopped
networks:
- backend
- frontend
environment:
AUTHENTIK_HOST: http://authentik-server:9000
AUTHENTIK_INSECURE: true
AUTHENTIK_TOKEN: ${AUTHENTIK_OUTPOST_TOKEN:-changeme}
AUTHENTIK_REDIS__HOST: authentik-redis
AUTHENTIK_REDIS__PORT: 6379
depends_on:
- authentik-server
- authentik-redis
# Secrets Management
vault:
image: hashicorp/vault:1.15
container_name: vault
restart: unless-stopped
networks:
- backend
ports:
- "8200:8200"
volumes:
- vault_data:/vault/data
- ./vault/config:/vault/config:ro
environment:
VAULT_DEV_ROOT_TOKEN_ID: ${VAULT_DEV_ROOT_TOKEN_ID:-root}
VAULT_DEV_LISTEN_ADDRESS: 0.0.0.0:8200
command: vault server -dev -dev-listen-address=0.0.0.0:8200
cap_add:
- IPC_LOCK
labels:
- "traefik.enable=true"
- "traefik.http.routers.vault.rule=Host(`vault.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.vault.entrypoints=websecure"
- "traefik.http.routers.vault.tls=true"
- "traefik.http.routers.vault.middlewares=authentik-forwardauth@file"
- "traefik.http.services.vault.loadbalancer.server.port=8200"
# Object Storage
minio:
image: minio/minio:RELEASE.2025-09-07T16-13-09Z
container_name: minio
restart: unless-stopped
networks:
- backend
ports:
- "9092:9092"
- "9093:9093"
volumes:
- minio_data:/data
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minio}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-miniopass}
MINIO_BROWSER_REDIRECT_URL: https://minio.${DOMAIN:-local.lan}
command: server /data --address ":9092" --console-address ":9093"
healthcheck:
test: ["CMD", "mc", "--version"]
interval: 30s
timeout: 20s
retries: 3
labels:
- "traefik.enable=true"
- "traefik.http.routers.minio-api.rule=Host(`minio-api.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.minio-api.entrypoints=websecure"
- "traefik.http.routers.minio-api.tls=true"
- "traefik.http.routers.minio-api.middlewares=authentik-forwardauth@file"
- "traefik.http.routers.minio-api.service=minio-api"
- "traefik.http.services.minio-api.loadbalancer.server.port=9092"
- "traefik.http.routers.minio-console.rule=Host(`minio.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.minio-console.entrypoints=websecure"
- "traefik.http.routers.minio-console.tls=true"
- "traefik.http.routers.minio-console.middlewares=authentik-forwardauth@file"
- "traefik.http.routers.minio-console.service=minio-console"
- "traefik.http.services.minio-console.loadbalancer.server.port=9093"
# Vector Database
qdrant:
image: qdrant/qdrant:v1.7.4
container_name: qdrant
restart: unless-stopped
networks:
- backend
ports:
- "6333:6333"
- "6334:6334"
volumes:
- qdrant_data:/qdrant/storage
environment:
QDRANT__SERVICE__GRPC_PORT: ${QDRANT__SERVICE__GRPC_PORT:-6334}
QDRANT__SERVICE__HTTP_PORT: 6333
QDRANT__LOG_LEVEL: INFO
labels:
- "traefik.enable=true"
- "traefik.http.routers.qdrant.rule=Host(`qdrant.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.qdrant.entrypoints=websecure"
- "traefik.http.routers.qdrant.tls=true"
- "traefik.http.routers.qdrant.middlewares=authentik-forwardauth@file"
- "traefik.http.services.qdrant.loadbalancer.server.port=6333"
# Knowledge Graph Database
neo4j:
image: neo4j:5.15-community
container_name: neo4j
restart: unless-stopped
networks:
- backend
ports:
- "7474:7474"
- "7687:7687"
volumes:
- neo4j_data:/data
- neo4j_logs:/logs
- ./neo4j/plugins:/plugins
environment:
NEO4J_AUTH: neo4j/${NEO4J_PASSWORD:-neo4jpass}
NEO4J_PLUGINS: '["apoc", "graph-data-science"]'
NEO4J_dbms_security_procedures_unrestricted: gds.*,apoc.*
NEO4J_dbms_security_procedures_allowlist: gds.*,apoc.*
NEO4J_apoc_export_file_enabled: true
NEO4J_apoc_import_file_enabled: true
NEO4J_apoc_import_file_use__neo4j__config: true
labels:
- "traefik.enable=true"
- "traefik.http.routers.neo4j.rule=Host(`neo4j.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.neo4j.entrypoints=websecure"
- "traefik.http.routers.neo4j.tls=true"
- "traefik.http.routers.neo4j.middlewares=authentik-forwardauth@file"
- "traefik.http.services.neo4j.loadbalancer.server.port=7474"
# Secure Client Data Store
postgres:
image: postgres:15-alpine
container_name: postgres
restart: unless-stopped
networks:
- backend
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
- ./postgres/init:/docker-entrypoint-initdb.d
environment:
POSTGRES_DB: tax_system
POSTGRES_USER: postgres
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
POSTGRES_INITDB_ARGS: "--auth-host=scram-sha-256"
command: >
postgres
-c shared_preload_libraries=pg_stat_statements
-c pg_stat_statements.track=all
-c max_connections=200
-c shared_buffers=256MB
-c effective_cache_size=1GB
-c maintenance_work_mem=64MB
-c checkpoint_completion_target=0.9
-c wal_buffers=16MB
-c default_statistics_target=100
-c random_page_cost=1.1
-c effective_io_concurrency=200
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 30s
timeout: 10s
retries: 3
# Cache & Session Store
redis:
image: redis:7-alpine
container_name: redis
restart: unless-stopped
networks:
- backend
ports:
- "6379:6379"
volumes:
- redis_data:/data
command: >
redis-server
--appendonly yes
--appendfsync everysec
--maxmemory 512mb
--maxmemory-policy allkeys-lru
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
interval: 30s
timeout: 10s
retries: 3
# Message Broker & Event Streaming
nats:
image: nats:2.10-alpine
container_name: nats
restart: unless-stopped
networks:
- backend
ports:
- "4222:4222" # NATS client connections
- "8222:8222" # HTTP monitoring
- "6222:6222" # Cluster routing (for future clustering)
volumes:
- nats_data:/data
command: >
--jetstream
--store_dir=/data
--http_port=8222
environment:
NATS_LOG_LEVEL: ${NATS_LOG_LEVEL:-info}
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:8222/healthz",
]
interval: 30s
timeout: 10s
retries: 3
labels:
- "traefik.enable=true"
- "traefik.http.routers.nats-monitor.rule=Host(`nats.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.nats-monitor.entrypoints=websecure"
- "traefik.http.routers.nats-monitor.tls=true"
- "traefik.http.routers.nats-monitor.middlewares=authentik-forwardauth@file"
- "traefik.http.services.nats-monitor.loadbalancer.server.port=8222"
# Monitoring & Observability
prometheus:
image: prom/prometheus:v2.48.1
container_name: prometheus
restart: unless-stopped
networks:
- backend
ports:
- "9090:9090"
volumes:
- prometheus_data:/prometheus
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.console.libraries=/etc/prometheus/console_libraries"
- "--web.console.templates=/etc/prometheus/consoles"
- "--storage.tsdb.retention.time=30d"
- "--web.enable-lifecycle"
labels:
- "traefik.enable=true"
- "traefik.http.routers.prometheus.rule=Host(`prometheus.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.prometheus.entrypoints=websecure"
- "traefik.http.routers.prometheus.tls=true"
- "traefik.http.routers.prometheus.middlewares=authentik-forwardauth@file"
- "traefik.http.services.prometheus.loadbalancer.server.port=9090"
grafana:
image: grafana/grafana:10.2.3
container_name: grafana
restart: unless-stopped
networks:
- backend
ports:
- "3000:3000"
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning:ro
- ./grafana/dashboards:/var/lib/grafana/dashboards:ro
environment:
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:-admin}
GF_USERS_ALLOW_SIGN_UP: false
GF_USERS_AUTO_ASSIGN_ORG: true
GF_USERS_AUTO_ASSIGN_ORG_ROLE: Viewer
GF_AUTH_GENERIC_OAUTH_ENABLED: true
GF_AUTH_GENERIC_OAUTH_NAME: Authentik
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: ${GRAFANA_OAUTH_CLIENT_ID:-grafana}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET: ${GRAFANA_OAUTH_CLIENT_SECRET:-changeme-grafana-secret}
GF_AUTH_GENERIC_OAUTH_SCOPES: openid profile email groups
GF_AUTH_GENERIC_OAUTH_AUTH_URL: https://auth.${DOMAIN:-local.lan}/application/o/authorize/
GF_AUTH_GENERIC_OAUTH_TOKEN_URL: https://auth.${DOMAIN:-local.lan}/application/o/token/
GF_AUTH_GENERIC_OAUTH_API_URL: https://auth.${DOMAIN:-local.lan}/application/o/userinfo/
GF_AUTH_GENERIC_OAUTH_AUTO_LOGIN: false
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: true
GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH: role
GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_STRICT: false
GF_AUTH_GENERIC_OAUTH_GROUPS_ATTRIBUTE_PATH: groups
GF_AUTH_OAUTH_AUTO_LOGIN: false
GF_AUTH_DISABLE_LOGIN_FORM: false
# Cookie and security settings
GF_SERVER_ROOT_URL: https://grafana.${DOMAIN:-local.lan}
GF_SERVER_SERVE_FROM_SUB_PATH: false
GF_SECURITY_COOKIE_SECURE: false
GF_SECURITY_COOKIE_SAMESITE: lax
GF_AUTH_GENERIC_OAUTH_USE_PKCE: true
labels:
- "traefik.enable=true"
- "traefik.http.routers.grafana.rule=Host(`grafana.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.grafana.entrypoints=websecure"
- "traefik.http.routers.grafana.tls=true"
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
loki:
image: grafana/loki:2.9.4
container_name: loki
restart: unless-stopped
networks:
- backend
ports:
- "3100:3100"
volumes:
- loki_data:/loki
labels:
- "traefik.enable=true"
- "traefik.http.routers.loki.rule=Host(`loki.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.loki.entrypoints=websecure"
- "traefik.http.routers.loki.tls=true"
- "traefik.http.routers.loki.middlewares=authentik-forwardauth@file"
- "traefik.http.services.loki.loadbalancer.server.port=3100"
# Feature Flags
unleash:
image: unleashorg/unleash-server:5.7.3
container_name: unleash
restart: unless-stopped
networks:
- frontend
- backend
ports:
- "4242:4242"
environment:
DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/unleash
DATABASE_SSL: false
LOG_LEVEL: info
depends_on:
- postgres
labels:
- "traefik.docker.network=ai-tax-agent-frontend"
- "traefik.enable=true"
- "traefik.http.routers.unleash.rule=Host(`unleash.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.unleash.entrypoints=websecure"
- "traefik.http.routers.unleash.tls=true"
- "traefik.http.routers.unleash.middlewares=authentik-forwardauth@file"
- "traefik.http.services.unleash.loadbalancer.server.port=4242"
# Application Services
svc-ingestion:
build:
context: ../../
dockerfile: apps/svc_ingestion/Dockerfile
container_name: svc-ingestion
restart: unless-stopped
networks:
- backend
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- MINIO_ENDPOINT=minio:9092
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system
- REDIS_URL=redis://redis:6379
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- vault
- minio
- postgres
- redis
- nats
- neo4j
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-ingestion.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/ingestion`)"
- "traefik.http.routers.svc-ingestion.entrypoints=websecure"
- "traefik.http.routers.svc-ingestion.tls=true"
- "traefik.http.routers.svc-ingestion.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-ingestion.loadbalancer.server.port=8000"
svc-extract:
build:
context: ../../
dockerfile: apps/svc_extract/Dockerfile
container_name: svc-extract
restart: unless-stopped
networks:
- backend
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- MINIO_ENDPOINT=minio:9092
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system
- RAG_EMBEDDING_MODEL=${RAG_EMBEDDING_MODEL:-bge-small-en-v1.5}
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- vault
- minio
- postgres
- nats
- neo4j
- redis
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-extract.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/extract`)"
- "traefik.http.routers.svc-extract.entrypoints=websecure"
- "traefik.http.routers.svc-extract.tls=true"
- "traefik.http.routers.svc-extract.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-extract.loadbalancer.server.port=8000"
svc-kg:
build:
context: ../../
dockerfile: apps/svc_kg/Dockerfile
container_name: svc-kg
restart: unless-stopped
networks:
- backend
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- NEO4J_URI=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-neo4jpass}
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- vault
- neo4j
- nats
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-kg.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/kg`)"
- "traefik.http.routers.svc-kg.entrypoints=websecure"
- "traefik.http.routers.svc-kg.tls=true"
- "traefik.http.routers.svc-kg.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-kg.loadbalancer.server.port=8000"
svc-rag-retriever:
build:
context: ../../
dockerfile: apps/svc_rag_retriever/Dockerfile
container_name: svc-rag-retriever
restart: unless-stopped
networks:
- backend
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- QDRANT_URL=http://qdrant:6333
- NEO4J_URI=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-neo4jpass}
- RAG_EMBEDDING_MODEL=${RAG_EMBEDDING_MODEL:-bge-small-en-v1.5}
- RAG_RERANKER_MODEL=${RAG_RERANKER_MODEL:-cross-encoder/ms-marco-MiniLM-L-6-v2}
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- vault
- qdrant
- neo4j
- nats
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-rag-retriever.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/rag`)"
- "traefik.http.routers.svc-rag-retriever.entrypoints=websecure"
- "traefik.http.routers.svc-rag-retriever.tls=true"
- "traefik.http.routers.svc-rag-retriever.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-rag-retriever.loadbalancer.server.port=8000"
svc-coverage:
build:
context: ../../
dockerfile: apps/svc_coverage/Dockerfile
container_name: svc-coverage
restart: unless-stopped
networks:
- backend
volumes:
- ../../config:/app/config:ro
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- NEO4J_URI=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-neo4jpass}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system
- RAG_SERVICE_URL=http://svc-rag-retriever:8000
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- vault
- neo4j
- postgres
- nats
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-coverage.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/coverage`)"
- "traefik.http.routers.svc-coverage.entrypoints=websecure"
- "traefik.http.routers.svc-coverage.tls=true"
- "traefik.http.routers.svc-coverage.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-coverage.loadbalancer.server.port=8000"
svc-firm-connectors:
build:
context: ../../
dockerfile: apps/svc_firm_connectors/Dockerfile
container_name: svc-firm-connectors
restart: unless-stopped
networks:
- backend
volumes:
- ../../config:/app/config:ro
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379
- MINIO_ENDPOINT=minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- postgres
- neo4j
- minio
- qdrant
- nats
- traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-firm-connectors.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/firm-connectors`)"
- "traefik.http.routers.svc-firm-connectors.entrypoints=websecure"
- "traefik.http.routers.svc-firm-connectors.tls=true"
- "traefik.http.routers.svc-firm-connectors.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-firm-connectors.loadbalancer.server.port=8000"
svc-forms:
build:
context: ../../
dockerfile: apps/svc_forms/Dockerfile
container_name: svc-forms
restart: unless-stopped
networks:
- backend
volumes:
- ../../config:/app/config:ro
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379
- MINIO_ENDPOINT=minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- postgres
- neo4j
- minio
- qdrant
- nats
- traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-forms.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/forms`)"
- "traefik.http.routers.svc-forms.entrypoints=websecure"
- "traefik.http.routers.svc-forms.tls=true"
- "traefik.http.routers.svc-forms.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-forms.loadbalancer.server.port=8000"
svc-hmrc:
build:
context: ../../
dockerfile: apps/svc_hmrc/Dockerfile
container_name: svc-hmrc
restart: unless-stopped
networks:
- backend
volumes:
- ../../config:/app/config:ro
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379
- MINIO_ENDPOINT=minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- postgres
- neo4j
- minio
- qdrant
- nats
- traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-hmrc.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/hmrc`)"
- "traefik.http.routers.svc-hmrc.entrypoints=websecure"
- "traefik.http.routers.svc-hmrc.tls=true"
- "traefik.http.routers.svc-hmrc.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-hmrc.loadbalancer.server.port=8000"
svc-normalize-map:
build:
context: ../../
dockerfile: apps/svc_normalize_map/Dockerfile
container_name: svc-normalize-map
restart: unless-stopped
networks:
- backend
volumes:
- ../../config:/app/config:ro
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379
- MINIO_ENDPOINT=minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- postgres
- neo4j
- minio
- qdrant
- nats
- traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-normalize-map.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/normalize-map`)"
- "traefik.http.routers.svc-normalize-map.entrypoints=websecure"
- "traefik.http.routers.svc-normalize-map.tls=true"
- "traefik.http.routers.svc-normalize-map.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-normalize-map.loadbalancer.server.port=8000"
svc-ocr:
build:
context: ../../
dockerfile: apps/svc_ocr/Dockerfile
container_name: svc-ocr
restart: unless-stopped
networks:
- backend
volumes:
- ../../config:/app/config:ro
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379
- MINIO_ENDPOINT=minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- postgres
- neo4j
- minio
- qdrant
- nats
- traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-ocr.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/ocr`)"
- "traefik.http.routers.svc-ocr.entrypoints=websecure"
- "traefik.http.routers.svc-ocr.tls=true"
- "traefik.http.routers.svc-ocr.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-ocr.loadbalancer.server.port=8000"
svc-rag-indexer:
build:
context: ../../
dockerfile: apps/svc_rag_indexer/Dockerfile
container_name: svc-rag-indexer
restart: unless-stopped
networks:
- backend
volumes:
- ../../config:/app/config:ro
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379
- MINIO_ENDPOINT=minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- postgres
- neo4j
- minio
- qdrant
- nats
- traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-rag-indexer.rule=Host(`api.${DOMAIN:-.lan}`) && PathPrefix(`/rag-indexer`)"
- "traefik.http.routers.svc-rag-indexer.entrypoints=websecure"
- "traefik.http.routers.svc-rag-indexer.tls=true"
- "traefik.http.routers.svc-rag-indexer.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-rag-indexer.loadbalancer.server.port=8000"
svc-reason:
build:
context: ../../
dockerfile: apps/svc_reason/Dockerfile
container_name: svc-reason
restart: unless-stopped
networks:
- backend
volumes:
- ../../config:/app/config:ro
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379
- MINIO_ENDPOINT=minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- postgres
- neo4j
- minio
- qdrant
- nats
- traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-reason.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/reason`)"
- "traefik.http.routers.svc-reason.entrypoints=websecure"
- "traefik.http.routers.svc-reason.tls=true"
- "traefik.http.routers.svc-reason.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-reason.loadbalancer.server.port=8000"
svc-rpa:
build:
context: ../../
dockerfile: apps/svc_rpa/Dockerfile
container_name: svc-rpa
restart: unless-stopped
networks:
- backend
volumes:
- ../../config:/app/config:ro
environment:
- VAULT_ADDR=http://vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379
- MINIO_ENDPOINT=minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on:
- postgres
- neo4j
- minio
- qdrant
- nats
- traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.svc-rpa.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/rpa`)"
- "traefik.http.routers.svc-rpa.entrypoints=websecure"
- "traefik.http.routers.svc-rpa.tls=true"
- "traefik.http.routers.svc-rpa.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-rpa.loadbalancer.server.port=8000"
ui-review:
build:
context: ../../ui-review
dockerfile: Dockerfile
container_name: ui-review
restart: unless-stopped
networks:
- frontend
environment:
- NEXTAUTH_URL=https://review.${DOMAIN:-local.lan}
- NEXTAUTH_SECRET=${NEXTAUTH_SECRET:-changeme}
- API_BASE_URL=https://api.${DOMAIN:-local.lan}
depends_on:
- traefik
labels:
- "traefik.docker.network=ai-tax-agent-frontend"
- "traefik.enable=true"
- "traefik.http.routers.ui-review.rule=Host(`review.${DOMAIN:-local.lan}`)"
- "traefik.http.routers.ui-review.entrypoints=websecure"
- "traefik.http.routers.ui-review.tls=true"
- "traefik.http.routers.ui-review.middlewares=authentik-forwardauth@file"
- "traefik.http.services.ui-review.loadbalancer.server.port=3030"

View File

@@ -27,9 +27,9 @@ volumes:
services: services:
# Edge Gateway & Load Balancer # Edge Gateway & Load Balancer
traefik: aia-traefik:
image: docker.io/library/traefik:v3.5.1 image: docker.io/library/traefik:v3.5.1
container_name: traefik container_name: aia-traefik
ports: ports:
- 80:80 - 80:80
- 443:443 - 443:443
@@ -49,9 +49,9 @@ services:
restart: unless-stopped restart: unless-stopped
# Identity & SSO # Identity & SSO
authentik-db: aia-authentik-db:
image: postgres:15-alpine image: postgres:15-alpine
container_name: authentik-db container_name: aia-authentik-db
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -67,9 +67,9 @@ services:
timeout: 10s timeout: 10s
retries: 3 retries: 3
authentik-redis: aia-authentik-redis:
image: redis:7-alpine image: redis:7-alpine
container_name: authentik-redis container_name: aia-authentik-redis
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -80,17 +80,17 @@ services:
timeout: 10s timeout: 10s
retries: 3 retries: 3
authentik-server: aia-authentik-server:
image: ghcr.io/goauthentik/server:2025.8.3 image: ghcr.io/goauthentik/server:2025.8.3
container_name: authentik-server container_name: aia-authentik-server
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
command: server command: server
environment: environment:
AUTHENTIK_REDIS__HOST: authentik-redis AUTHENTIK_REDIS__HOST: aia-authentik-redis
AUTHENTIK_POSTGRESQL__HOST: authentik-db AUTHENTIK_POSTGRESQL__HOST: aia-authentik-db
AUTHENTIK_POSTGRESQL__USER: authentik AUTHENTIK_POSTGRESQL__USER: authentik
AUTHENTIK_POSTGRESQL__NAME: authentik AUTHENTIK_POSTGRESQL__NAME: authentik
AUTHENTIK_POSTGRESQL__PASSWORD: ${AUTHENTIK_DB_PASSWORD:-authentik} AUTHENTIK_POSTGRESQL__PASSWORD: ${AUTHENTIK_DB_PASSWORD:-authentik}
@@ -105,8 +105,8 @@ services:
- ../authentik/custom-templates:/templates - ../authentik/custom-templates:/templates
- ../authentik/bootstrap.yaml:/blueprints/bootstrap.yaml - ../authentik/bootstrap.yaml:/blueprints/bootstrap.yaml
depends_on: depends_on:
- authentik-db - aia-authentik-db
- authentik-redis - aia-authentik-redis
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.authentik.rule=Host(`auth.${DOMAIN:-local.lan}`)" - "traefik.http.routers.authentik.rule=Host(`auth.${DOMAIN:-local.lan}`)"
@@ -115,16 +115,16 @@ services:
- "traefik.docker.network=ai-tax-agent-frontend" - "traefik.docker.network=ai-tax-agent-frontend"
- "traefik.http.services.authentik.loadbalancer.server.port=9000" - "traefik.http.services.authentik.loadbalancer.server.port=9000"
authentik-worker: aia-authentik-worker:
image: ghcr.io/goauthentik/server:2025.8.3 image: ghcr.io/goauthentik/server:2025.8.3
container_name: authentik-worker container_name: aia-authentik-worker
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
command: worker command: worker
environment: environment:
AUTHENTIK_REDIS__HOST: authentik-redis AUTHENTIK_REDIS__HOST: aia-authentik-redis
AUTHENTIK_POSTGRESQL__HOST: authentik-db AUTHENTIK_POSTGRESQL__HOST: aia-authentik-db
AUTHENTIK_POSTGRESQL__USER: authentik AUTHENTIK_POSTGRESQL__USER: authentik
AUTHENTIK_POSTGRESQL__NAME: authentik AUTHENTIK_POSTGRESQL__NAME: authentik
AUTHENTIK_POSTGRESQL__PASSWORD: ${AUTHENTIK_DB_PASSWORD:-authentik} AUTHENTIK_POSTGRESQL__PASSWORD: ${AUTHENTIK_DB_PASSWORD:-authentik}
@@ -134,30 +134,30 @@ services:
- ../authentik/media:/media - ../authentik/media:/media
- ../authentik/custom-templates:/templates - ../authentik/custom-templates:/templates
depends_on: depends_on:
- authentik-db - aia-authentik-db
- authentik-redis - aia-authentik-redis
authentik-outpost: aia-authentik-outpost:
image: ghcr.io/goauthentik/proxy:2025.8.3 image: ghcr.io/goauthentik/proxy:2025.8.3
container_name: authentik-outpost container_name: aia-authentik-outpost
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
- frontend - frontend
environment: environment:
AUTHENTIK_HOST: http://authentik-server:9000 AUTHENTIK_HOST: http://aia-authentik-server:9000
AUTHENTIK_INSECURE: true AUTHENTIK_INSECURE: true
AUTHENTIK_TOKEN: ${AUTHENTIK_OUTPOST_TOKEN:-changeme} AUTHENTIK_TOKEN: ${AUTHENTIK_OUTPOST_TOKEN:-changeme}
AUTHENTIK_REDIS__HOST: authentik-redis AUTHENTIK_REDIS__HOST: aia-authentik-redis
AUTHENTIK_REDIS__PORT: 6379 AUTHENTIK_REDIS__PORT: 6379
depends_on: depends_on:
- authentik-server - aia-authentik-server
- authentik-redis - aia-authentik-redis
# Secrets Management # Secrets Management
vault: aia-vault:
image: hashicorp/vault:1.15 image: hashicorp/vault:1.15
container_name: vault container_name: aia-vault
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -181,9 +181,9 @@ services:
- "traefik.http.services.vault.loadbalancer.server.port=8200" - "traefik.http.services.vault.loadbalancer.server.port=8200"
# Object Storage # Object Storage
minio: aia-minio:
image: minio/minio:RELEASE.2025-09-07T16-13-09Z image: minio/minio:RELEASE.2025-09-07T16-13-09Z
container_name: minio container_name: aia-minio
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -218,9 +218,9 @@ services:
- "traefik.http.services.minio-console.loadbalancer.server.port=9093" - "traefik.http.services.minio-console.loadbalancer.server.port=9093"
# Vector Database # Vector Database
qdrant: aia-qdrant:
image: qdrant/qdrant:v1.7.4 image: qdrant/qdrant:v1.7.4
container_name: qdrant container_name: aia-qdrant
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -242,9 +242,9 @@ services:
- "traefik.http.services.qdrant.loadbalancer.server.port=6333" - "traefik.http.services.qdrant.loadbalancer.server.port=6333"
# Knowledge Graph Database # Knowledge Graph Database
neo4j: aia-neo4j:
image: neo4j:5.15-community image: neo4j:5.15-community
container_name: neo4j container_name: aia-neo4j
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -257,7 +257,7 @@ services:
- ../neo4j/plugins:/plugins - ../neo4j/plugins:/plugins
environment: environment:
NEO4J_AUTH: neo4j/${NEO4J_PASSWORD:-neo4jpass} NEO4J_AUTH: neo4j/${NEO4J_PASSWORD:-neo4jpass}
NEO4J_PLUGINS: '["apoc", "graph-data-science"]' NEO4J_PLUGINS: '["apoc", "graph-daia-science"]'
NEO4J_dbms_security_procedures_unrestricted: gds.*,apoc.* NEO4J_dbms_security_procedures_unrestricted: gds.*,apoc.*
NEO4J_dbms_security_procedures_allowlist: gds.*,apoc.* NEO4J_dbms_security_procedures_allowlist: gds.*,apoc.*
NEO4J_apoc_export_file_enabled: true NEO4J_apoc_export_file_enabled: true
@@ -272,9 +272,9 @@ services:
- "traefik.http.services.neo4j.loadbalancer.server.port=7474" - "traefik.http.services.neo4j.loadbalancer.server.port=7474"
# Secure Client Data Store # Secure Client Data Store
postgres: aia-postgres:
image: postgres:15-alpine image: postgres:15-alpine
container_name: postgres container_name: aia-postgres
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -308,9 +308,9 @@ services:
retries: 3 retries: 3
# Cache & Session Store # Cache & Session Store
redis: aia-redis:
image: redis:7-alpine image: redis:7-alpine
container_name: redis container_name: aia-redis
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -331,9 +331,9 @@ services:
retries: 3 retries: 3
# Message Broker & Event Streaming # Message Broker & Event Streaming
nats: aia-nats:
image: nats:2.10-alpine image: nats:2.10-alpine
container_name: nats container_name: aia-nats
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -371,9 +371,9 @@ services:
- "traefik.http.services.nats-monitor.loadbalancer.server.port=8222" - "traefik.http.services.nats-monitor.loadbalancer.server.port=8222"
# Monitoring & Observability # Monitoring & Observability
prometheus: aia-prometheus:
image: prom/prometheus:v2.48.1 image: prom/prometheus:v2.48.1
container_name: prometheus container_name: aia-prometheus
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -396,9 +396,9 @@ services:
- "traefik.http.routers.prometheus.middlewares=authentik-forwardauth@file" - "traefik.http.routers.prometheus.middlewares=authentik-forwardauth@file"
- "traefik.http.services.prometheus.loadbalancer.server.port=9090" - "traefik.http.services.prometheus.loadbalancer.server.port=9090"
grafana: aia-grafana:
image: grafana/grafana:10.2.3 image: grafana/grafana:10.2.3
container_name: grafana container_name: aia-grafana
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -441,9 +441,9 @@ services:
- "traefik.http.routers.grafana.tls=true" - "traefik.http.routers.grafana.tls=true"
- "traefik.http.services.grafana.loadbalancer.server.port=3000" - "traefik.http.services.grafana.loadbalancer.server.port=3000"
loki: aia-loki:
image: grafana/loki:2.9.4 image: grafana/loki:2.9.4
container_name: loki container_name: aia-loki
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
@@ -460,9 +460,9 @@ services:
- "traefik.http.services.loki.loadbalancer.server.port=3100" - "traefik.http.services.loki.loadbalancer.server.port=3100"
# Feature Flags # Feature Flags
unleash: aia-unleash:
image: unleashorg/unleash-server:5.7.3 image: unleashorg/unleash-server:5.7.3
container_name: unleash container_name: aia-unleash
restart: unless-stopped restart: unless-stopped
networks: networks:
- frontend - frontend
@@ -470,11 +470,11 @@ services:
ports: ports:
- "4242:4242" - "4242:4242"
environment: environment:
DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/unleash DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/unleash
DATABASE_SSL: false DATABASE_SSL: false
LOG_LEVEL: info LOG_LEVEL: info
depends_on: depends_on:
- postgres - aia-postgres
labels: labels:
- "traefik.docker.network=ai-tax-agent-frontend" - "traefik.docker.network=ai-tax-agent-frontend"
- "traefik.enable=true" - "traefik.enable=true"
@@ -485,31 +485,31 @@ services:
- "traefik.http.services.unleash.loadbalancer.server.port=4242" - "traefik.http.services.unleash.loadbalancer.server.port=4242"
# Application Services # Application Services
svc-ingestion: aia-svc-ingestion:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_ingestion/Dockerfile dockerfile: apps/svc_ingestion/Dockerfile
container_name: svc-ingestion container_name: aia-svc-ingestion
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=aia-minio:9092
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/tax_system
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://aia-redis:6379
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- vault - aia-vault
- minio - aia-minio
- postgres - aia-postgres
- redis - aia-redis
- nats - aia-nats
- neo4j - aia-neo4j
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-ingestion.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/ingestion`)" - "traefik.http.routers.svc-ingestion.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/ingestion`)"
@@ -518,31 +518,31 @@ services:
- "traefik.http.routers.svc-ingestion.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-ingestion.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-ingestion.loadbalancer.server.port=8000" - "traefik.http.services.svc-ingestion.loadbalancer.server.port=8000"
svc-extract: aia-svc-extract:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_extract/Dockerfile dockerfile: apps/svc_extract/Dockerfile
container_name: svc-extract container_name: aia-svc-extract
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=aia-minio:9092
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/tax_system
- RAG_EMBEDDING_MODEL=${RAG_EMBEDDING_MODEL:-bge-small-en-v1.5} - RAG_EMBEDDING_MODEL=${RAG_EMBEDDING_MODEL:-bge-small-en-v1.5}
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- vault - aia-vault
- minio - aia-minio
- postgres - aia-postgres
- nats - aia-nats
- neo4j - aia-neo4j
- redis - aia-redis
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-extract.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/extract`)" - "traefik.http.routers.svc-extract.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/extract`)"
@@ -551,28 +551,28 @@ services:
- "traefik.http.routers.svc-extract.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-extract.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-extract.loadbalancer.server.port=8000" - "traefik.http.services.svc-extract.loadbalancer.server.port=8000"
svc-kg: aia-svc-kg:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_kg/Dockerfile dockerfile: apps/svc_kg/Dockerfile
container_name: svc-kg container_name: aia-svc-kg
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- NEO4J_URI=bolt://neo4j:7687 - NEO4J_URI=bolt://aia-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-neo4jpass} - NEO4J_PASSWORD=${NEO4J_PASSWORD:-neo4jpass}
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- vault - aia-vault
- neo4j - aia-neo4j
- nats - aia-nats
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-kg.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/kg`)" - "traefik.http.routers.svc-kg.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/kg`)"
@@ -581,32 +581,32 @@ services:
- "traefik.http.routers.svc-kg.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-kg.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-kg.loadbalancer.server.port=8000" - "traefik.http.services.svc-kg.loadbalancer.server.port=8000"
svc-rag-retriever: aia-svc-rag-retriever:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_rag_retriever/Dockerfile dockerfile: apps/svc_rag_retriever/Dockerfile
container_name: svc-rag-retriever container_name: aia-svc-rag-retriever
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://aia-qdrant:6333
- NEO4J_URI=bolt://neo4j:7687 - NEO4J_URI=bolt://aia-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-neo4jpass} - NEO4J_PASSWORD=${NEO4J_PASSWORD:-neo4jpass}
- RAG_EMBEDDING_MODEL=${RAG_EMBEDDING_MODEL:-bge-small-en-v1.5} - RAG_EMBEDDING_MODEL=${RAG_EMBEDDING_MODEL:-bge-small-en-v1.5}
- RAG_RERANKER_MODEL=${RAG_RERANKER_MODEL:-cross-encoder/ms-marco-MiniLM-L-6-v2} - RAG_RERANKER_MODEL=${RAG_RERANKER_MODEL:-cross-encoder/ms-marco-MiniLM-L-6-v2}
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- vault - aia-vault
- qdrant - aia-qdrant
- neo4j - aia-neo4j
- nats - aia-nats
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-rag-retriever.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/rag`)" - "traefik.http.routers.svc-rag-retriever.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/rag`)"
@@ -615,33 +615,33 @@ services:
- "traefik.http.routers.svc-rag-retriever.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-rag-retriever.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-rag-retriever.loadbalancer.server.port=8000" - "traefik.http.services.svc-rag-retriever.loadbalancer.server.port=8000"
svc-coverage: aia-svc-coverage:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_coverage/Dockerfile dockerfile: apps/svc_coverage/Dockerfile
container_name: svc-coverage container_name: aia-svc-coverage
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
volumes: volumes:
- ../../config:/app/config:ro - ../../config:/app/config:ro
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- NEO4J_URI=bolt://neo4j:7687 - NEO4J_URI=bolt://aia-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-neo4jpass} - NEO4J_PASSWORD=${NEO4J_PASSWORD:-neo4jpass}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/tax_system
- RAG_SERVICE_URL=http://svc-rag-retriever:8000 - RAG_SERVICE_URL=http://aia-svc-rag-retriever:8000
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- vault - aia-vault
- neo4j - aia-neo4j
- postgres - aia-postgres
- nats - aia-nats
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-coverage.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/coverage`)" - "traefik.http.routers.svc-coverage.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/coverage`)"
@@ -650,40 +650,40 @@ services:
- "traefik.http.routers.svc-coverage.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-coverage.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-coverage.loadbalancer.server.port=8000" - "traefik.http.services.svc-coverage.loadbalancer.server.port=8000"
svc-firm-connectors: aia-svc-firm-connectors:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_firm_connectors/Dockerfile dockerfile: apps/svc_firm_connectors/Dockerfile
container_name: svc-firm-connectors container_name: aia-svc-firm-connectors
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
volumes: volumes:
- ../../config:/app/config:ro - ../../config:/app/config:ro
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://aia-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password} - NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://aia-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=aia-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin} - MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin} - MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://aia-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-} - KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- postgres - aia-postgres
- neo4j - aia-neo4j
- minio - aia-minio
- qdrant - aia-qdrant
- nats - aia-nats
- traefik - aia-traefik
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-firm-connectors.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/firm-connectors`)" - "traefik.http.routers.svc-firm-connectors.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/firm-connectors`)"
@@ -692,40 +692,40 @@ services:
- "traefik.http.routers.svc-firm-connectors.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-firm-connectors.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-firm-connectors.loadbalancer.server.port=8000" - "traefik.http.services.svc-firm-connectors.loadbalancer.server.port=8000"
svc-forms: aia-svc-forms:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_forms/Dockerfile dockerfile: apps/svc_forms/Dockerfile
container_name: svc-forms container_name: aia-svc-forms
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
volumes: volumes:
- ../../config:/app/config:ro - ../../config:/app/config:ro
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://aia-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password} - NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://aia-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=aia-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin} - MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin} - MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://aia-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-} - KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- postgres - aia-postgres
- neo4j - aia-neo4j
- minio - aia-minio
- qdrant - aia-qdrant
- nats - aia-nats
- traefik - aia-traefik
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-forms.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/forms`)" - "traefik.http.routers.svc-forms.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/forms`)"
@@ -734,40 +734,40 @@ services:
- "traefik.http.routers.svc-forms.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-forms.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-forms.loadbalancer.server.port=8000" - "traefik.http.services.svc-forms.loadbalancer.server.port=8000"
svc-hmrc: aia-svc-hmrc:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_hmrc/Dockerfile dockerfile: apps/svc_hmrc/Dockerfile
container_name: svc-hmrc container_name: aia-svc-hmrc
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
volumes: volumes:
- ../../config:/app/config:ro - ../../config:/app/config:ro
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://aia-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password} - NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://aia-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=aia-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin} - MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin} - MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://aia-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-} - KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- postgres - aia-postgres
- neo4j - aia-neo4j
- minio - aia-minio
- qdrant - aia-qdrant
- nats - aia-nats
- traefik - aia-traefik
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-hmrc.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/hmrc`)" - "traefik.http.routers.svc-hmrc.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/hmrc`)"
@@ -776,40 +776,40 @@ services:
- "traefik.http.routers.svc-hmrc.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-hmrc.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-hmrc.loadbalancer.server.port=8000" - "traefik.http.services.svc-hmrc.loadbalancer.server.port=8000"
svc-normalize-map: aia-svc-normalize-map:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_normalize_map/Dockerfile dockerfile: apps/svc_normalize_map/Dockerfile
container_name: svc-normalize-map container_name: aia-svc-normalize-map
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
volumes: volumes:
- ../../config:/app/config:ro - ../../config:/app/config:ro
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://aia-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password} - NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://aia-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=aia-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin} - MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin} - MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://aia-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-} - KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- postgres - aia-postgres
- neo4j - aia-neo4j
- minio - aia-minio
- qdrant - aia-qdrant
- nats - aia-nats
- traefik - aia-traefik
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-normalize-map.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/normalize-map`)" - "traefik.http.routers.svc-normalize-map.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/normalize-map`)"
@@ -818,40 +818,40 @@ services:
- "traefik.http.routers.svc-normalize-map.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-normalize-map.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-normalize-map.loadbalancer.server.port=8000" - "traefik.http.services.svc-normalize-map.loadbalancer.server.port=8000"
svc-ocr: aia-svc-ocr:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_ocr/Dockerfile dockerfile: apps/svc_ocr/Dockerfile
container_name: svc-ocr container_name: aia-svc-ocr
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
volumes: volumes:
- ../../config:/app/config:ro - ../../config:/app/config:ro
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://aia-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password} - NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://aia-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=aia-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin} - MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin} - MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://aia-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-} - KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- postgres - aia-postgres
- neo4j - aia-neo4j
- minio - aia-minio
- qdrant - aia-qdrant
- nats - aia-nats
- traefik - aia-traefik
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-ocr.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/ocr`)" - "traefik.http.routers.svc-ocr.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/ocr`)"
@@ -860,40 +860,40 @@ services:
- "traefik.http.routers.svc-ocr.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-ocr.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-ocr.loadbalancer.server.port=8000" - "traefik.http.services.svc-ocr.loadbalancer.server.port=8000"
svc-rag-indexer: aia-svc-rag-indexer:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_rag_indexer/Dockerfile dockerfile: apps/svc_rag_indexer/Dockerfile
container_name: svc-rag-indexer container_name: aia-svc-rag-indexer
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
volumes: volumes:
- ../../config:/app/config:ro - ../../config:/app/config:ro
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://aia-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password} - NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://aia-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=aia-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin} - MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin} - MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://aia-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-} - KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- postgres - aia-postgres
- neo4j - aia-neo4j
- minio - aia-minio
- qdrant - aia-qdrant
- nats - aia-nats
- traefik - aia-traefik
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-rag-indexer.rule=Host(`api.${DOMAIN:-.lan}`) && PathPrefix(`/rag-indexer`)" - "traefik.http.routers.svc-rag-indexer.rule=Host(`api.${DOMAIN:-.lan}`) && PathPrefix(`/rag-indexer`)"
@@ -902,41 +902,41 @@ services:
- "traefik.http.routers.svc-rag-indexer.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-rag-indexer.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-rag-indexer.loadbalancer.server.port=8000" - "traefik.http.services.svc-rag-indexer.loadbalancer.server.port=8000"
svc-reason: aia-svc-reason:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_reason/Dockerfile dockerfile: apps/svc_reason/Dockerfile
container_name: svc-reason container_name: aia-svc-reason
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
volumes: volumes:
- ../../config:/app/config:ro - ../../config:/app/config:ro
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://aia-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password} - NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://aia-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=aia-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin} - MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin} - MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://aia-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-} - KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- postgres - aia-postgres
- neo4j - aia-neo4j
- minio - aia-minio
- qdrant - aia-qdrant
- nats - aia-nats
- traefik - aia-traefik
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
@@ -946,40 +946,40 @@ services:
- "traefik.http.routers.svc-reason.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-reason.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-reason.loadbalancer.server.port=8000" - "traefik.http.services.svc-reason.loadbalancer.server.port=8000"
svc-rpa: aia-svc-rpa:
build: build:
context: ../../ context: ../../
dockerfile: apps/svc_rpa/Dockerfile dockerfile: apps/svc_rpa/Dockerfile
container_name: svc-rpa container_name: aia-svc-rpa
restart: unless-stopped restart: unless-stopped
networks: networks:
- backend - backend
volumes: volumes:
- ../../config:/app/config:ro - ../../config:/app/config:ro
environment: environment:
- VAULT_ADDR=http://vault:8200 - VAULT_ADDR=http://aia-vault:8200
- VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root} - VAULT_TOKEN=${VAULT_DEV_ROOT_TOKEN_ID:-root}
- POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@postgres:5432/tax_system - POSTGRES_URL=postgresql://postgres:${POSTGRES_PASSWORD:-postgres}@aia-postgres:5432/tax_system
- NEO4J_URL=bolt://neo4j:7687 - NEO4J_URL=bolt://aia-neo4j:7687
- NEO4J_USER=neo4j - NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password} - NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://aia-redis:6379
- MINIO_ENDPOINT=minio:9092 - MINIO_ENDPOINT=aia-minio:9092
- MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin} - MINIO_ACCESS_KEY=${MINIO_ACCESS_KEY:-minioadmin}
- MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin} - MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-minioadmin}
- QDRANT_URL=http://qdrant:6333 - QDRANT_URL=http://aia-qdrant:6333
- EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory} - EVENT_BUS_TYPE=${EVENT_BUS_TYPE:-memory}
- KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-} - KAFKA_BOOTSTRAP_SERVERS=${KAFKA_BOOTSTRAP_SERVERS:-}
- NATS_SERVERS=${NATS_SERVERS:-nats://nats:4222} - NATS_SERVERS=${NATS_SERVERS:-nats://aia-nats:4222}
- NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS} - NATS_STREAM_NAME=${NATS_STREAM_NAME:-TAX_AGENT_EVENTS}
- NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent} - NATS_CONSUMER_GROUP=${NATS_CONSUMER_GROUP:-tax-agent}
depends_on: depends_on:
- postgres - aia-postgres
- neo4j - aia-neo4j
- minio - aia-minio
- qdrant - aia-qdrant
- nats - aia-nats
- traefik - aia-traefik
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.svc-rpa.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/rpa`)" - "traefik.http.routers.svc-rpa.rule=Host(`api.${DOMAIN:-local.lan}`) && PathPrefix(`/rpa`)"
@@ -988,11 +988,11 @@ services:
- "traefik.http.routers.svc-rpa.middlewares=authentik-forwardauth@file,rate-limit@file" - "traefik.http.routers.svc-rpa.middlewares=authentik-forwardauth@file,rate-limit@file"
- "traefik.http.services.svc-rpa.loadbalancer.server.port=8000" - "traefik.http.services.svc-rpa.loadbalancer.server.port=8000"
ui-review: aia-ui-review:
build: build:
context: ../../ui-review context: ../../ui-review
dockerfile: Dockerfile dockerfile: Dockerfile
container_name: ui-review container_name: aia-ui-review
restart: unless-stopped restart: unless-stopped
networks: networks:
- frontend - frontend
@@ -1001,7 +1001,7 @@ services:
- NEXTAUTH_SECRET=${NEXTAUTH_SECRET:-changeme} - NEXTAUTH_SECRET=${NEXTAUTH_SECRET:-changeme}
- API_BASE_URL=https://api.${DOMAIN:-local.lan} - API_BASE_URL=https://api.${DOMAIN:-local.lan}
depends_on: depends_on:
- traefik - aia-traefik
labels: labels:
- "traefik.docker.network=ai-tax-agent-frontend" - "traefik.docker.network=ai-tax-agent-frontend"
- "traefik.enable=true" - "traefik.enable=true"

View File

@@ -1,63 +0,0 @@
---
services:
gitea-server:
image: docker.io/gitea/gitea:1.24.5
container_name: gitea-server
env_file:
- ./.env # contains the GoDaddy API Key and Secret
environment:
- USER_UID=1000
- USER_GID=1000
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=${POSTGRES_HOST:-gitea-postgres}:${POSTGRES_PORT:-5432}
- GITEA__database__NAME=${POSTGRES_DB:-gitea}
- GITEA__database__USER=${POSTGRES_USER:-gitea}
- GITEA__database__PASSWD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD not set}
- GITEA__server__SSH_PORT=2221 # <-- (Optional) Replace with your desired SSH port
- GITEA__server__ROOT_URL=https://gitea.harkon.co.uk # <-- Replace with your FQDN
networks:
- frontend
- backend
volumes:
- gitea-data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "2221:22" # <-- (Optional) Replace with your desired SSH port
depends_on:
- gitea-postgres
labels:
- traefik.enable=true
- traefik.http.services.gitea.loadbalancer.server.port=3000
- traefik.http.services.gitea.loadbalancer.server.scheme=http
- traefik.http.routers.gitea-https.entrypoints=websecure
- traefik.http.routers.gitea-https.rule=Host(`gitea.harkon.co.uk`) # <-- Replace with your FQDN
- traefik.http.routers.gitea-https.tls=true
- traefik.http.routers.gitea-https.tls.certresolver=godaddy # <-- Replace with your certresolver
- traefik.http.routers.gitea.service=gitea
restart: unless-stopped
gitea-postgres:
image: docker.io/library/postgres:17.5
container_name: gitea-postgres
environment:
- POSTGRES_USER=${POSTGRES_USER:-gitea}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD not set}
- POSTGRES_DB=${POSTGRES_DB:-gitea}
networks:
- backend
volumes:
- gitea-db:/var/lib/postgresql/data
restart: unless-stopped
volumes:
gitea-data:
driver: local
gitea-db:
driver: local
networks:
frontend:
external: true
backend:
external: true

View File

@@ -1,104 +0,0 @@
# /opt/compose/nextcloud/compose.yml
networks:
frontend:
external: true
backend:
external: true
volumes:
nextcloud_html:
nextcloud_data:
nextcloud_config:
nextcloud_apps:
nextcloud_postgres:
nextcloud_redis:
services:
nextcloud-postgres:
image: postgres:16-alpine
container_name: nextcloud-postgres
restart: unless-stopped
environment:
POSTGRES_DB: nextcloud
POSTGRES_USER: nextcloud
POSTGRES_PASSWORD: ${NEXTCLOUD_DB_PASSWORD}
volumes:
- nextcloud_postgres:/var/lib/postgresql/data
networks: [backend]
nextcloud-redis:
image: redis:7-alpine
container_name: nextcloud-redis
restart: unless-stopped
command:
[
"redis-server",
"--appendonly",
"yes",
"--requirepass",
"${REDIS_PASSWORD}",
]
volumes:
- nextcloud_redis:/data
networks: [backend]
nextcloud-server:
image: nextcloud:apache
container_name: nextcloud-server
restart: unless-stopped
depends_on: [nextcloud-postgres, nextcloud-redis]
env_file:
- ./.env
environment:
# DB
POSTGRES_DB: nextcloud
POSTGRES_USER: nextcloud
POSTGRES_PASSWORD: ${NEXTCLOUD_DB_PASSWORD}
POSTGRES_HOST: nextcloud-postgres
# Initial admin (used only on first run)
NEXTCLOUD_ADMIN_USER: ${NEXTCLOUD_ADMIN_USER}
NEXTCLOUD_ADMIN_PASSWORD: ${NEXTCLOUD_ADMIN_PASSWORD}
# Reverse frontend awareness
NEXTCLOUD_TRUSTED_DOMAINS: cloud.harkon.co.uk
OVERWRITEHOST: cloud.harkon.co.uk
OVERWRITEPROTOCOL: https
# Redis for locks/cache
REDIS_HOST: nextcloud-redis
REDIS_HOST_PASSWORD: ${REDIS_PASSWORD}
volumes:
- nextcloud_html:/var/www/html
- nextcloud_data:/var/www/html/data
- nextcloud_config:/var/www/html/config
- nextcloud_apps:/var/www/html/custom_apps
networks:
- frontend # for Traefik
- backend # for DB/Redis
labels:
- traefik.enable=true
- traefik.http.routers.nextcloud.rule=Host(`cloud.harkon.co.uk`)
- traefik.http.routers.nextcloud.entrypoints=websecure
- traefik.http.routers.nextcloud.tls=true
- traefik.http.routers.nextcloud.tls.certresolver=godaddy
- traefik.http.services.nextcloud.loadbalancer.server.port=80
- traefik.http.routers.nextcloud.service=nextcloud
# Run background jobs as a separate container
cron:
image: nextcloud:apache
container_name: nextcloud-cron
restart: unless-stopped
depends_on: [nc_db, nc_redis]
entrypoint: /cron.sh
environment:
POSTGRES_DB: nextcloud
POSTGRES_USER: nextcloud
POSTGRES_PASSWORD: ${NEXTCLOUD_DB_PASSWORD}
POSTGRES_HOST: db
REDIS_HOST: redis
REDIS_HOST_PASSWORD: ${REDIS_PASSWORD}
volumes:
- nextcloud_html:/var/www/html
- nextcloud_data:/var/www/html/data
- nextcloud_config:/var/www/html/config
- nextcloud_apps:/var/www/html/custom_apps
networks: [backend]

View File

@@ -1,27 +0,0 @@
---
services:
app:
container_name: portainer
image: docker.io/portainer/portainer-ce:2.33.1-alpine
volumes:
- /run/docker.sock:/var/run/docker.sock
- portainer-data:/data
labels:
- traefik.enable=true
- traefik.http.services.portainer.loadbalancer.server.port=9000
- traefik.http.routers.portainer.service=portainer
- traefik.http.routers.portainer.entrypoints=websecure
- traefik.http.routers.portainer.rule=Host(`portainer.harkon.co.uk`)
- traefik.http.routers.portainer.tls=true
- traefik.http.routers.portainer.tls.certresolver=godaddy
networks:
- frontend
restart: unless-stopped
volumes:
portainer-data:
driver: local
networks:
frontend:
external: true

View File

@@ -1,39 +0,0 @@
# FILE: infra/compose/traefik/compose.yaml
# there is another traefik instance in the infra used by the application.
# Current instance used for company services on the dev environment.
# TODO: Unify the two traefik instances.
---
services:
traefik:
image: docker.io/library/traefik:v3.5.1
container_name: traefik
ports:
- 80:80
- 443:443
# --> (Optional) Enable Dashboard, don't do in production
# - 8080:8080
# <--
volumes:
- /run/docker.sock:/run/docker.sock:ro
- ./config/:/etc/traefik/:ro
- ./certs/:/var/traefik/certs/:rw
environment:
- CF_DNS_API_TOKEN=your-cloudflare-api-token # <-- Change this to your Cloudflare API Token
env_file:
- ./.provider.env # contains the GoDaddy API Key and Secret
networks:
- frontend
restart: unless-stopped
labels:
- traefik.enable=true
- traefik.http.middlewares.basicauth.basicauth.users=admin:$2y$05$/B2hjJGytCjjMK4Rah1/aeJofBrzqEnAVoZCMKKwetS9mgmck.MVS
- traefik.http.routers.traefik.rule=Host(`traefik.harkon.co.uk`)
- traefik.http.routers.traefik.entrypoints=websecure
- traefik.http.routers.traefik.tls.certresolver=le
- traefik.http.routers.traefik.middlewares=basicauth@docker
- traefik.http.routers.traefik.service=api@internal
networks:
frontend:
external: true # <-- (Optional) Change this to false if you want to create a new network
#

View File

@@ -1,21 +0,0 @@
# --> (Example) Expose an external service using Traefik...
# http:
# # -- Change Router Configuration here...
# routers:
# your-local-router:
# rule: "Host(`your-local-service.your-domain.com`)" # <-- Change Rules here...
# service: your-local-service # <-- Change Service Name here...
# priority: 1000 # <-- (Optional) Change Routing Priority here...
# entryPoints:
# - web
# - websecure
# tls:
# certResolver: cloudflare
#
# # -- Change Service Configuration here...
# services:
# your-local-service: # <-- Change Service Name here...
# loadBalancer:
# servers:
# - url: "http://your-local-service:port" # <-- Change Target Service URL here...
# <--

View File

@@ -1,22 +0,0 @@
# --> (Optional) When using Passbolt with Traefik...
# http:
# middlewares:
# passbolt-middleware:
# headers:
# FrameDeny: true
# AccessControlAllowMethods: 'GET,OPTIONS,PUT'
# AccessControlAllowOriginList:
# - origin-list-or-null
# AccessControlMaxAge: 100
# AddVaryHeader: true
# BrowserXssFilter: true
# ContentTypeNosniff: true
# ForceSTSHeader: true
# STSIncludeSubdomains: true
# STSPreload: true
# ContentSecurityPolicy: default-src 'self' 'unsafe-inline'
# CustomFrameOptionsValue: SAMEORIGIN
# ReferrerPolicy: same-origin
# PermissionsPolicy: vibrate 'self'
# STSSeconds: 315360000
# <--

View File

@@ -1,18 +0,0 @@
# --> (Example) Change TLS Configuration here...
# tls:
# options:
# default:
# minVersion: VersionTLS12
# sniStrict: true
# curvePreferences:
# - CurveP256
# - CurveP384
# - CurveP521
# cipherSuites:
# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
# <--

View File

@@ -1,64 +0,0 @@
---
global:
checkNewVersion: false
sendAnonymousUsage: false
# --> (Optional) Change log level and format here ...
# - level: [TRACE, DEBUG, INFO, WARN, ERROR, FATAL]
log:
level: DEBUG
# <--
# --> (Optional) Enable accesslog here ...
accesslog: {}
# <--
# --> (Optional) Enable API and Dashboard here, don't do in production
api:
dashboard: true
insecure: true
# <--
# -- Change EntryPoints here...
entryPoints:
web:
address: :80
# --> (Optional) Redirect all HTTP to HTTPS
http:
redirections:
entryPoint:
to: websecure
scheme: https
# <--
websecure:
address: :443
# -- Configure your CertificateResolver here...
certificatesResolvers:
godaddy:
acme:
email: info@harkon.co.uk
storage: /var/traefik/certs/godaddy-acme.json
caServer: "https://acme-v02.api.letsencrypt.org/directory"
dnsChallenge:
provider: godaddy
resolvers:
- 1.1.1.1:53
- 8.8.8.8:53
- 97.74.103.44:53
- 173.201.71.44:53
# --> (Optional) Disable TLS Cert verification check
# serversTransport:
# insecureSkipVerify: true
# <--
providers:
docker:
exposedByDefault: false # <-- (Optional) Change this to true if you want to expose all services
# Specify discovery network - This ensures correct name resolving and possible issues with containers, that are in multiple networks.
# E.g. Database container in a separate network and a container in the frontend and database network.
network: frontend
file:
directory: /etc/traefik
watch: true

View File

@@ -6,6 +6,8 @@
# Domain Configuration # Domain Configuration
DOMAIN=dev.harkon.co.uk DOMAIN=dev.harkon.co.uk
EMAIL=dev@harkon.co.uk EMAIL=dev@harkon.co.uk
# ACME email for Traefik certificate resolver
ACME_EMAIL=dev@harkon.co.uk
# Database Passwords (CHANGE THESE!) # Database Passwords (CHANGE THESE!)
POSTGRES_PASSWORD=CHANGE_ME_POSTGRES_PASSWORD POSTGRES_PASSWORD=CHANGE_ME_POSTGRES_PASSWORD
@@ -71,4 +73,3 @@ REGISTRY_USER=harkon
REGISTRY_PASSWORD=CHANGE_ME_GITEA_TOKEN REGISTRY_PASSWORD=CHANGE_ME_GITEA_TOKEN
IMAGE_TAG=dev IMAGE_TAG=dev
OWNER=harkon OWNER=harkon

View File

@@ -5,6 +5,8 @@
# Domain Configuration # Domain Configuration
DOMAIN=localhost DOMAIN=localhost
EMAIL=dev@localhost EMAIL=dev@localhost
# ACME email for Traefik certificate resolver (optional for local)
ACME_EMAIL=dev@localhost
# Database Passwords (use simple passwords for local) # Database Passwords (use simple passwords for local)
POSTGRES_PASSWORD=postgres POSTGRES_PASSWORD=postgres
@@ -69,4 +71,3 @@ REGISTRY_USER=admin
REGISTRY_PASSWORD=admin123 REGISTRY_PASSWORD=admin123
IMAGE_TAG=latest IMAGE_TAG=latest
OWNER=local OWNER=local

View File

@@ -6,6 +6,8 @@
# Domain Configuration # Domain Configuration
DOMAIN=harkon.co.uk DOMAIN=harkon.co.uk
EMAIL=info@harkon.co.uk EMAIL=info@harkon.co.uk
# ACME email for Traefik certificate resolver
ACME_EMAIL=ops@harkon.co.uk
# Database Passwords (CHANGE THESE!) # Database Passwords (CHANGE THESE!)
POSTGRES_PASSWORD=CHANGE_ME_POSTGRES_PASSWORD POSTGRES_PASSWORD=CHANGE_ME_POSTGRES_PASSWORD
@@ -71,4 +73,3 @@ REGISTRY_USER=harkon
REGISTRY_PASSWORD=CHANGE_ME_GITEA_TOKEN REGISTRY_PASSWORD=CHANGE_ME_GITEA_TOKEN
IMAGE_TAG=v1.0.1 IMAGE_TAG=v1.0.1
OWNER=harkon OWNER=harkon

View File

@@ -182,14 +182,14 @@ deploy_all() {
log_info "Deploying all stacks..." log_info "Deploying all stacks..."
# Check if networks exist # Check if networks exist
if ! docker network inspect frontend >/dev/null 2>&1; then if ! docker network inspect apa-frontend >/dev/null 2>&1; then
log_warning "Network 'frontend' does not exist. Creating..." log_warning "Network 'apa-frontend' does not exist. Creating..."
docker network create frontend docker network create apa-frontend
fi fi
if ! docker network inspect backend >/dev/null 2>&1; then if ! docker network inspect apa-backend >/dev/null 2>&1; then
log_warning "Network 'backend' does not exist. Creating..." log_warning "Network 'apa-backend' does not exist. Creating..."
docker network create backend docker network create apa-backend
fi fi
# Deploy in order # Deploy in order
@@ -238,4 +238,3 @@ case $STACK in
esac esac
log_success "Deployment complete!" log_success "Deployment complete!"

View File

@@ -1,178 +0,0 @@
#!/bin/bash
# Script to reorganize infrastructure from old structure to new structure
# This is a helper script to move files around
set -e
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m'
log_info() {
echo -e "${BLUE} $1${NC}"
}
log_success() {
echo -e "${GREEN}$1${NC}"
}
log_warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
}
log_error() {
echo -e "${RED}$1${NC}"
}
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
INFRA_DIR="$(dirname "$SCRIPT_DIR")"
PROJECT_ROOT="$(dirname "$INFRA_DIR")"
log_info "Reorganizing infrastructure structure..."
echo " Infra Dir: $INFRA_DIR"
echo ""
# Step 1: Create directory structure (already done by mkdir command)
log_info "Step 1: Verifying directory structure..."
if [ -d "$INFRA_DIR/base" ] && [ -d "$INFRA_DIR/environments" ]; then
log_success "Directory structure exists"
else
log_error "Directory structure not found. Run: mkdir -p infra/{base,environments/{local,development,production},configs/{traefik,grafana,prometheus,loki,vault,authentik},certs/{local,development,production}}"
exit 1
fi
# Step 2: Move config files
log_info "Step 2: Moving configuration files..."
# Traefik configs
if [ -d "$INFRA_DIR/traefik" ] && [ ! -f "$INFRA_DIR/configs/traefik/.moved" ]; then
log_info " Moving Traefik configs..."
cp -r "$INFRA_DIR/traefik/"* "$INFRA_DIR/configs/traefik/" 2>/dev/null || true
touch "$INFRA_DIR/configs/traefik/.moved"
log_success " Traefik configs moved"
fi
# Grafana configs
if [ -d "$INFRA_DIR/grafana" ] && [ ! -f "$INFRA_DIR/configs/grafana/.moved" ]; then
log_info " Moving Grafana configs..."
cp -r "$INFRA_DIR/grafana/"* "$INFRA_DIR/configs/grafana/" 2>/dev/null || true
touch "$INFRA_DIR/configs/grafana/.moved"
log_success " Grafana configs moved"
fi
# Prometheus configs
if [ -d "$INFRA_DIR/prometheus" ] && [ ! -f "$INFRA_DIR/configs/prometheus/.moved" ]; then
log_info " Moving Prometheus configs..."
cp -r "$INFRA_DIR/prometheus/"* "$INFRA_DIR/configs/prometheus/" 2>/dev/null || true
touch "$INFRA_DIR/configs/prometheus/.moved"
log_success " Prometheus configs moved"
fi
# Loki configs
if [ -d "$INFRA_DIR/loki" ] && [ ! -f "$INFRA_DIR/configs/loki/.moved" ]; then
log_info " Moving Loki configs..."
cp -r "$INFRA_DIR/loki/"* "$INFRA_DIR/configs/loki/" 2>/dev/null || true
touch "$INFRA_DIR/configs/loki/.moved"
log_success " Loki configs moved"
fi
# Promtail configs
if [ -d "$INFRA_DIR/promtail" ] && [ ! -f "$INFRA_DIR/configs/promtail/.moved" ]; then
log_info " Moving Promtail configs..."
mkdir -p "$INFRA_DIR/configs/promtail"
cp -r "$INFRA_DIR/promtail/"* "$INFRA_DIR/configs/promtail/" 2>/dev/null || true
touch "$INFRA_DIR/configs/promtail/.moved"
log_success " Promtail configs moved"
fi
# Vault configs
if [ -d "$INFRA_DIR/vault" ] && [ ! -f "$INFRA_DIR/configs/vault/.moved" ]; then
log_info " Moving Vault configs..."
cp -r "$INFRA_DIR/vault/"* "$INFRA_DIR/configs/vault/" 2>/dev/null || true
touch "$INFRA_DIR/configs/vault/.moved"
log_success " Vault configs moved"
fi
# Authentik configs
if [ -d "$INFRA_DIR/authentik" ] && [ ! -f "$INFRA_DIR/configs/authentik/.moved" ]; then
log_info " Moving Authentik configs..."
cp -r "$INFRA_DIR/authentik/"* "$INFRA_DIR/configs/authentik/" 2>/dev/null || true
touch "$INFRA_DIR/configs/authentik/.moved"
log_success " Authentik configs moved"
fi
# Step 3: Move certificates
log_info "Step 3: Moving certificates..."
if [ -d "$INFRA_DIR/certs" ] && [ -f "$INFRA_DIR/certs/local.crt" ]; then
log_info " Moving local certificates..."
cp "$INFRA_DIR/certs/local.crt" "$INFRA_DIR/certs/local/" 2>/dev/null || true
cp "$INFRA_DIR/certs/local.key" "$INFRA_DIR/certs/local/" 2>/dev/null || true
log_success " Certificates moved"
fi
# Step 4: Update base compose files paths
log_info "Step 4: Updating base compose file paths..."
# Update infrastructure.yaml
if [ -f "$INFRA_DIR/base/infrastructure.yaml" ]; then
log_info " Updating infrastructure.yaml paths..."
# This would require sed commands to update volume paths
# For now, just log that manual update may be needed
log_warning " Manual review recommended for volume paths"
fi
# Step 5: Create .gitignore for sensitive files
log_info "Step 5: Creating .gitignore..."
cat > "$INFRA_DIR/.gitignore" << 'EOF'
# Environment files (contain secrets)
environments/*/.env
!environments/*/.env.example
# Certificates
certs/*/
!certs/.gitkeep
# Traefik provider credentials
configs/traefik/.provider.env
# Backup files
*.backup
*.tmp
# Docker volumes (if mounted locally)
volumes/
# Logs
*.log
EOF
log_success ".gitignore created"
# Step 6: Create .gitkeep files
log_info "Step 6: Creating .gitkeep files..."
touch "$INFRA_DIR/certs/local/.gitkeep"
touch "$INFRA_DIR/certs/development/.gitkeep"
touch "$INFRA_DIR/certs/production/.gitkeep"
log_success ".gitkeep files created"
# Step 7: Summary
echo ""
log_success "Reorganization complete!"
echo ""
log_info "Next steps:"
echo " 1. Review moved files in configs/ directory"
echo " 2. Update compose file paths if needed"
echo " 3. Create environment files:"
echo " cp infra/environments/local/.env.example infra/environments/local/.env"
echo " cp infra/environments/development/.env.example infra/environments/development/.env"
echo " 4. Test deployment:"
echo " ./infra/scripts/deploy.sh local infrastructure"
echo ""
log_warning "Old directories (traefik/, grafana/, etc.) are preserved for safety"
log_warning "You can remove them after verifying the new structure works"
echo ""

View File

@@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
# Setup Docker Networks for AI Tax Agent # Setup Docker Networks for AI Tax Agent (production/dev stacks)
# Creates frontend and backend networks if they don't exist # Creates apa-frontend and apa-backend networks if they don't exist
set -e set -e
@@ -26,23 +26,22 @@ log_warning() {
log_info "Setting up Docker networks..." log_info "Setting up Docker networks..."
# Create frontend network # Create frontend network
if docker network inspect frontend >/dev/null 2>&1; then if docker network inspect apa-frontend >/dev/null 2>&1; then
log_warning "Network 'frontend' already exists" log_warning "Network 'apa-frontend' already exists"
else else
docker network create frontend docker network create apa-frontend
log_success "Created network 'frontend'" log_success "Created network 'apa-frontend'"
fi fi
# Create backend network # Create backend network
if docker network inspect backend >/dev/null 2>&1; then if docker network inspect apa-backend >/dev/null 2>&1; then
log_warning "Network 'backend' already exists" log_warning "Network 'apa-backend' already exists"
else else
docker network create backend docker network create apa-backend
log_success "Created network 'backend'" log_success "Created network 'apa-backend'"
fi fi
log_success "Docker networks ready!" log_success "Docker networks ready!"
echo "" echo ""
log_info "Networks:" log_info "Networks:"
docker network ls | grep -E "frontend|backend" docker network ls | grep -E "apa-frontend|apa-backend"

View File

@@ -3,7 +3,7 @@
import asyncio import asyncio
import logging import logging
from libs.events import EventPayload, NATSEventBus, create_event_bus from libs.events import EventPayload, NATSEventBus
# Configure logging # Configure logging
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
@@ -20,7 +20,7 @@ async def example_handler(topic: str, payload: EventPayload) -> None:
) )
async def main(): async def main() -> None:
"""Main example function.""" """Main example function."""
# Method 1: Direct instantiation # Method 1: Direct instantiation
nats_bus = NATSEventBus( nats_bus = NATSEventBus(
@@ -67,7 +67,11 @@ async def main():
# Publish an update event # Publish an update event
update_payload = EventPayload( update_payload = EventPayload(
data={"user_id": "user-1", "name": "Updated User 1", "email": "user1@example.com"}, data={
"user_id": "user-1",
"name": "Updated User 1",
"email": "user1@example.com",
},
actor="admin", actor="admin",
tenant_id="tenant-123", tenant_id="tenant-123",
) )
@@ -86,7 +90,7 @@ async def main():
logger.info("NATS event bus stopped") logger.info("NATS event bus stopped")
async def cluster_example(): async def cluster_example() -> None:
"""Example with NATS cluster configuration.""" """Example with NATS cluster configuration."""
# Connect to a NATS cluster # Connect to a NATS cluster
cluster_bus = NATSEventBus( cluster_bus = NATSEventBus(
@@ -117,7 +121,7 @@ async def cluster_example():
await cluster_bus.stop() await cluster_bus.stop()
async def error_handling_example(): async def error_handling_example() -> None:
"""Example showing error handling.""" """Example showing error handling."""
async def failing_handler(topic: str, payload: EventPayload) -> None: async def failing_handler(topic: str, payload: EventPayload) -> None:

View File

@@ -5,10 +5,11 @@ import json
from collections.abc import Awaitable, Callable from collections.abc import Awaitable, Callable
from typing import Any from typing import Any
import nats # type: ignore import nats
import structlog import structlog
from nats.aio.client import Client as NATS # type: ignore from nats.aio.client import Client as NATS
from nats.js import JetStreamContext # type: ignore from nats.js import JetStreamContext
from nats.js.api import AckPolicy, ConsumerConfig, DeliverPolicy
from .base import EventBus, EventPayload from .base import EventBus, EventPayload
@@ -157,10 +158,10 @@ class NATSEventBus(EventBus): # pylint: disable=too-many-instance-attributes
subscription = await self.js.pull_subscribe( subscription = await self.js.pull_subscribe(
subject=subject, subject=subject,
durable=consumer_name, durable=consumer_name,
config=nats.js.api.ConsumerConfig( config=ConsumerConfig(
durable_name=consumer_name, durable_name=consumer_name,
ack_policy=nats.js.api.AckPolicy.EXPLICIT, ack_policy=AckPolicy.EXPLICIT,
deliver_policy=nats.js.api.DeliverPolicy.NEW, deliver_policy=DeliverPolicy.NEW,
max_deliver=3, max_deliver=3,
ack_wait=30, # 30 seconds ack_wait=30, # 30 seconds
), ),
@@ -193,15 +194,12 @@ class NATSEventBus(EventBus): # pylint: disable=too-many-instance-attributes
await self.js.stream_info(self.stream_name) await self.js.stream_info(self.stream_name)
logger.debug("Stream already exists", stream=self.stream_name) logger.debug("Stream already exists", stream=self.stream_name)
except nats.js.errors.NotFoundError: except Exception:
# Stream doesn't exist, create it # Stream doesn't exist, create it
try: try:
await self.js.add_stream( await self.js.add_stream(
name=self.stream_name, name=self.stream_name,
subjects=[f"{self.stream_name}.*"], subjects=[f"{self.stream_name}.*"],
retention=nats.js.api.RetentionPolicy.WORK_QUEUE,
max_age=7 * 24 * 60 * 60, # 7 days in seconds
storage=nats.js.api.StorageType.FILE,
) )
logger.info("Created JetStream stream", stream=self.stream_name) logger.info("Created JetStream stream", stream=self.stream_name)
@@ -261,7 +259,7 @@ class NATSEventBus(EventBus): # pylint: disable=too-many-instance-attributes
) )
await message.nak() await message.nak()
except asyncio.TimeoutError: except TimeoutError:
# No messages available, continue polling # No messages available, continue polling
continue continue
except Exception as e: # pylint: disable=broad-exception-caught except Exception as e: # pylint: disable=broad-exception-caught

View File

@@ -15,3 +15,6 @@ pretty = True
[mypy-tests.*] [mypy-tests.*]
# tests may use fixtures without full annotations, but keep strict overall # tests may use fixtures without full annotations, but keep strict overall
disallow_untyped_defs = False disallow_untyped_defs = False
[mypy-minio.*]
ignore_missing_imports = True

View File

@@ -1,401 +0,0 @@
#!/bin/bash
# Cleanup and align infrastructure structure
# This script consolidates configurations and removes duplication
set -e
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m'
log_info() {
echo -e "${BLUE} $1${NC}"
}
log_success() {
echo -e "${GREEN}$1${NC}"
}
log_warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
}
log_error() {
echo -e "${RED}$1${NC}"
}
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
INFRA_DIR="$PROJECT_ROOT/infra"
log_info "Cleaning up infrastructure structure..."
echo " Project Root: $PROJECT_ROOT"
echo " Infra Dir: $INFRA_DIR"
echo ""
# Step 1: Backup current structure
log_info "Step 1: Creating backup..."
BACKUP_DIR="$PROJECT_ROOT/infra-backup-$(date +%Y%m%d_%H%M%S)"
mkdir -p "$BACKUP_DIR"
cp -r "$INFRA_DIR/configs" "$BACKUP_DIR/" 2>/dev/null || true
log_success "Backup created at $BACKUP_DIR"
# Step 2: Align Traefik configurations
log_info "Step 2: Aligning Traefik configurations..."
# The source of truth is infra/compose/traefik/config/
# Remove duplicates from infra/configs/traefik/config/
if [ -d "$INFRA_DIR/configs/traefik/config" ]; then
log_warning " Removing duplicate Traefik configs from infra/configs/traefik/config/"
rm -rf "$INFRA_DIR/configs/traefik/config"
log_success " Removed duplicate Traefik configs"
fi
# Keep only app-specific Traefik middleware in configs
mkdir -p "$INFRA_DIR/configs/traefik"
cat > "$INFRA_DIR/configs/traefik/app-middlewares.yml" << 'EOF'
# Application-specific Traefik middlewares
# These are loaded by the application infrastructure, not the external Traefik
http:
middlewares:
# Large upload middleware for Gitea registry
gitea-large-upload:
buffering:
maxRequestBodyBytes: 5368709120 # 5GB
memRequestBodyBytes: 104857600 # 100MB
maxResponseBodyBytes: 5368709120 # 5GB
memResponseBodyBytes: 104857600 # 100MB
retryExpression: "IsNetworkError() && Attempts() < 3"
# Rate limiting for public APIs
api-ratelimit:
rateLimit:
average: 100
burst: 50
period: 1s
# Security headers
security-headers:
headers:
frameDeny: true
sslRedirect: true
browserXssFilter: true
contentTypeNosniff: true
stsIncludeSubdomains: true
stsPreload: true
stsSeconds: 31536000
EOF
log_success " Created app-specific Traefik middlewares"
# Step 3: Align Authentik configurations
log_info "Step 3: Aligning Authentik configurations..."
# infra/compose/authentik/ - Production service configs
# infra/configs/authentik/ - Application bootstrap configs (keep separate)
if [ -d "$INFRA_DIR/configs/authentik" ]; then
log_info " Keeping app-specific Authentik bootstrap in infra/configs/authentik/"
log_success " Authentik configs aligned"
fi
# Step 4: Clean up old directories
log_info "Step 4: Cleaning up old directories..."
# Remove old standalone config directories that were moved
OLD_DIRS=(
"$INFRA_DIR/traefik"
"$INFRA_DIR/grafana"
"$INFRA_DIR/prometheus"
"$INFRA_DIR/loki"
"$INFRA_DIR/promtail"
"$INFRA_DIR/vault"
"$INFRA_DIR/neo4j"
"$INFRA_DIR/postgres"
)
for dir in "${OLD_DIRS[@]}"; do
if [ -d "$dir" ] && [ -f "$INFRA_DIR/configs/$(basename $dir)/.moved" ]; then
log_warning " Removing old directory: $dir"
rm -rf "$dir"
log_success " Removed $dir"
fi
done
# Step 5: Update .gitignore
log_info "Step 5: Updating .gitignore..."
cat > "$INFRA_DIR/.gitignore" << 'EOF'
# Environment files (contain secrets)
environments/*/.env
!environments/*/.env.example
compose/*/.env
!compose/env.example
# Certificates
certs/*/
!certs/.gitkeep
compose/*/certs/
!compose/*/certs/.gitkeep
# Provider credentials
compose/traefik/.provider.env
configs/traefik/.provider.env
# Data directories
compose/*/data/
compose/*/media/
compose/authentik/media/
compose/authentik/custom-templates/
compose/portainer/portainer/
# Backup files
*.backup
*.tmp
*-backup-*/
# Docker volumes (if mounted locally)
volumes/
# Logs
*.log
logs/
# Moved markers
**/.moved
EOF
log_success ".gitignore updated"
# Step 6: Create README for external services
log_info "Step 6: Creating documentation..."
cat > "$INFRA_DIR/compose/README.md" << 'EOF'
# External Services
This directory contains Docker Compose configurations for external services that run on the production server.
## Services
### Traefik
- **Location**: `traefik/`
- **Purpose**: Reverse proxy and load balancer for all services
- **Deploy**: `cd traefik && docker compose up -d`
- **Access**: https://traefik.harkon.co.uk
### Authentik
- **Location**: `authentik/`
- **Purpose**: SSO and authentication provider
- **Deploy**: `cd authentik && docker compose up -d`
- **Access**: https://authentik.harkon.co.uk
### Gitea
- **Location**: `gitea/`
- **Purpose**: Git repository hosting and container registry
- **Deploy**: `cd gitea && docker compose up -d`
- **Access**: https://gitea.harkon.co.uk
### Nextcloud
- **Location**: `nextcloud/`
- **Purpose**: File storage and collaboration
- **Deploy**: `cd nextcloud && docker compose up -d`
- **Access**: https://nextcloud.harkon.co.uk
### Portainer
- **Location**: `portainer/`
- **Purpose**: Docker management UI
- **Deploy**: `cd portainer && docker compose up -d`
- **Access**: https://portainer.harkon.co.uk
## Deployment
### Production (Remote Server)
```bash
# SSH to server
ssh deploy@141.136.35.199
# Navigate to service directory
cd /opt/ai-tax-agent/infra/compose/<service>
# Deploy service
docker compose up -d
# Check logs
docker compose logs -f
# Check status
docker compose ps
```
### Local Development
For local development, use the all-in-one compose file:
```bash
cd infra/compose
docker compose -f docker-compose.local.yml up -d
```
## Configuration
Each service has its own `.env` file for environment-specific configuration:
- `traefik/.provider.env` - GoDaddy API credentials
- `authentik/.env` - Authentik secrets
- `gitea/.env` - Gitea database credentials
## Networks
All services use shared Docker networks:
- `frontend` - Public-facing services
- `backend` - Internal services
Create networks before deploying:
```bash
docker network create frontend
docker network create backend
```
## Maintenance
### Update Service
```bash
cd /opt/ai-tax-agent/infra/compose/<service>
docker compose pull
docker compose up -d
```
### Restart Service
```bash
cd /opt/ai-tax-agent/infra/compose/<service>
docker compose restart
```
### View Logs
```bash
cd /opt/ai-tax-agent/infra/compose/<service>
docker compose logs -f
```
### Backup Data
```bash
# Backup volumes
docker run --rm -v <service>_data:/data -v $(pwd):/backup alpine tar czf /backup/<service>-backup.tar.gz /data
```
## Integration with Application
These external services are used by the application infrastructure:
- **Traefik** - Routes traffic to application services
- **Authentik** - Provides SSO for application UIs
- **Gitea** - Hosts Docker images for application services
The application infrastructure is deployed separately using:
```bash
./infra/scripts/deploy.sh production infrastructure
./infra/scripts/deploy.sh production services
```
EOF
log_success "Created external services README"
# Step 7: Create deployment helper script
log_info "Step 7: Creating deployment helper script..."
cat > "$SCRIPT_DIR/deploy-external.sh" << 'EOF'
#!/bin/bash
# Deploy external services on production server
# Usage: ./scripts/deploy-external.sh <service>
set -e
SERVICE=$1
if [ -z "$SERVICE" ]; then
echo "Usage: $0 <service>"
echo ""
echo "Available services:"
echo " traefik"
echo " authentik"
echo " gitea"
echo " nextcloud"
echo " portainer"
echo " all"
exit 1
fi
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
COMPOSE_DIR="$PROJECT_ROOT/infra/compose"
deploy_service() {
local svc=$1
echo "🚀 Deploying $svc..."
if [ ! -d "$COMPOSE_DIR/$svc" ]; then
echo "❌ Service directory not found: $COMPOSE_DIR/$svc"
return 1
fi
cd "$COMPOSE_DIR/$svc"
docker compose up -d
echo "✅ $svc deployed"
}
if [ "$SERVICE" = "all" ]; then
deploy_service "traefik"
sleep 5
deploy_service "authentik"
sleep 5
deploy_service "gitea"
deploy_service "nextcloud"
deploy_service "portainer"
else
deploy_service "$SERVICE"
fi
echo ""
echo "🎉 Deployment complete!"
EOF
chmod +x "$SCRIPT_DIR/deploy-external.sh"
log_success "Created deploy-external.sh script"
# Step 8: Summary
echo ""
log_success "Cleanup complete!"
echo ""
log_info "Summary of changes:"
echo " ✅ Removed duplicate Traefik configs"
echo " ✅ Created app-specific Traefik middlewares"
echo " ✅ Aligned Authentik configurations"
echo " ✅ Cleaned up old directories"
echo " ✅ Updated .gitignore"
echo " ✅ Created external services README"
echo " ✅ Created deploy-external.sh script"
echo ""
log_info "Backup location: $BACKUP_DIR"
echo ""
log_info "Next steps:"
echo " 1. Review changes in infra/ directory"
echo " 2. Update Makefile with new targets"
echo " 3. Test local deployment: make run"
echo " 4. Test external service deployment: ./scripts/deploy-external.sh traefik"
echo ""

View File

@@ -1,54 +0,0 @@
#!/bin/bash
# Debug script for remote server issues
echo "=== Connecting to remote server ==="
echo "Running diagnostics..."
echo ""
ssh -t deploy@141.136.35.199 << 'ENDSSH'
set -x
echo "=== 1. Check Docker is running ==="
docker --version
docker info | head -10
echo ""
echo "=== 2. Check Docker images ==="
docker images | head -20
echo ""
echo "=== 3. Check if logged in to Gitea ==="
cat ~/.docker/config.json 2>/dev/null || echo "No Docker config found"
echo ""
echo "=== 4. Check Gitea container ==="
docker ps | grep gitea || echo "Gitea not running"
echo ""
echo "=== 5. Check recent Docker logs ==="
docker ps -a --format "{{.Names}}" | head -5
echo ""
echo "=== 6. Test Gitea registry connectivity ==="
curl -I https://gitea.harkon.co.uk/v2/ 2>&1 | head -10
echo ""
echo "=== 7. Check disk space ==="
df -h | grep -E "Filesystem|/$"
echo ""
echo "=== 8. Check if base-ml build is in progress ==="
docker ps | grep build || echo "No build in progress"
echo ""
echo "=== 9. Check Docker build logs (if any) ==="
docker ps -a --filter "ancestor=gitea.harkon.co.uk/harkon/base-ml" --format "{{.ID}} {{.Status}}"
echo ""
echo "=== 10. Try a simple docker login test ==="
echo "Testing registry connectivity..."
curl -v https://gitea.harkon.co.uk/v2/ 2>&1 | grep -E "HTTP|401|200"
ENDSSH

View File

@@ -1,54 +0,0 @@
#!/bin/bash
# Deploy external services on production server
# Usage: ./scripts/deploy-external.sh <service>
set -e
SERVICE=$1
if [ -z "$SERVICE" ]; then
echo "Usage: $0 <service>"
echo ""
echo "Available services:"
echo " traefik"
echo " authentik"
echo " gitea"
echo " nextcloud"
echo " portainer"
echo " all"
exit 1
fi
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
COMPOSE_DIR="$PROJECT_ROOT/infra/compose"
deploy_service() {
local svc=$1
echo "🚀 Deploying $svc..."
if [ ! -d "$COMPOSE_DIR/$svc" ]; then
echo "❌ Service directory not found: $COMPOSE_DIR/$svc"
return 1
fi
cd "$COMPOSE_DIR/$svc"
docker compose up -d
echo "$svc deployed"
}
if [ "$SERVICE" = "all" ]; then
deploy_service "traefik"
sleep 5
deploy_service "authentik"
sleep 5
deploy_service "gitea"
deploy_service "nextcloud"
deploy_service "portainer"
else
deploy_service "$SERVICE"
fi
echo ""
echo "🎉 Deployment complete!"

View File

@@ -216,7 +216,7 @@ verify_deployment() {
echo "" echo ""
echo "=== Docker Networks ===" echo "=== Docker Networks ==="
docker network ls | grep -E "frontend|backend" docker network ls | grep -E "apa-frontend|apa-backend"
echo "" echo ""
echo "=== Disk Usage ===" echo "=== Disk Usage ==="
@@ -290,7 +290,7 @@ case "${1:-all}" in
verify_deployment verify_deployment
;; ;;
logs) logs)
show_logs "${2:-svc-ingestion}" show_logs "${2:-apa-svc-ingestion}"
;; ;;
all) all)
deploy_all deploy_all
@@ -310,4 +310,3 @@ case "${1:-all}" in
exit 1 exit 1
;; ;;
esac esac

View File

@@ -20,7 +20,7 @@ echo "🔐 Generating development certificates..."
# Step 3: Start core infrastructure first # Step 3: Start core infrastructure first
echo "🏗️ Starting core infrastructure..." echo "🏗️ Starting core infrastructure..."
cd infra/compose cd infra/compose
docker compose -f docker-compose.local.yml up -d traefik postgres redis docker compose -f docker-compose.local.yml up -d ata-traefik ata-postgres ata-redis
cd ../.. cd ../..
# Step 4: Wait for core services and fix database issues # Step 4: Wait for core services and fix database issues
@@ -31,28 +31,28 @@ sleep 15
# Step 5: Start Authentik components in order # Step 5: Start Authentik components in order
echo "🔐 Starting Authentik components..." echo "🔐 Starting Authentik components..."
cd infra/compose cd infra/compose
docker compose -f docker-compose.local.yml up -d authentik-db authentik-redis docker compose -f docker-compose.local.yml up -d ata-authentik-db ata-authentik-redis
sleep 10 sleep 10
docker compose -f docker-compose.local.yml up -d authentik-server docker compose -f docker-compose.local.yml up -d ata-authentik-server
sleep 15 sleep 15
docker compose -f docker-compose.local.yml up -d authentik-worker authentik-outpost docker compose -f docker-compose.local.yml up -d ata-authentik-worker ata-authentik-outpost
cd ../.. cd ../..
# Step 6: Start remaining infrastructure # Step 6: Start remaining infrastructure
echo "🏗️ Starting remaining infrastructure..." echo "🏗️ Starting remaining infrastructure..."
cd infra/compose cd infra/compose
docker compose -f docker-compose.local.yml up -d vault neo4j qdrant minio prometheus grafana loki docker compose -f docker-compose.local.yml up -d ata-vault ata-neo4j ata-qdrant ata-minio ata-prometheus ata-grafana ata-loki
cd ../.. cd ../..
# Step 7: Wait and verify Authentik is healthy # Step 7: Wait and verify Authentik is healthy
echo "⏳ Waiting for Authentik to be healthy..." echo "⏳ Waiting for Authentik to be healthy..."
timeout=120 timeout=120
counter=0 counter=0
while [ "$(docker inspect --format='{{.State.Health.Status}}' authentik-server 2>/dev/null)" != "healthy" ]; do while [ "$(docker inspect --format='{{.State.Health.Status}}' ata-authentik-server 2>/dev/null)" != "healthy" ]; do
if [ $counter -ge $timeout ]; then if [ $counter -ge $timeout ]; then
echo "❌ Authentik server failed to become healthy within $timeout seconds" echo "❌ Authentik server failed to become healthy within $timeout seconds"
echo "📋 Checking logs..." echo "📋 Checking logs..."
docker compose -f infra/compose/docker-compose.local.yml logs --tail=10 authentik-server docker compose -f infra/compose/docker-compose.local.yml logs --tail=10 ata-authentik-server
exit 1 exit 1
fi fi
sleep 2 sleep 2
@@ -65,15 +65,15 @@ echo "✅ Authentik is healthy"
echo "🚀 Starting application services..." echo "🚀 Starting application services..."
cd infra/compose cd infra/compose
docker compose -f docker-compose.local.yml up -d \ docker compose -f docker-compose.local.yml up -d \
svc-ingestion svc-extract svc-forms svc-hmrc svc-kg \ ata-svc-ingestion ata-svc-extract ata-svc-forms ata-svc-hmrc ata-svc-kg \
svc-normalize-map svc-ocr svc-rag-indexer svc-rag-retriever \ ata-svc-normalize-map ata-svc-ocr ata-svc-rag-indexer ata-svc-rag-retriever \
svc-reason svc-rpa svc-firm-connectors svc-coverage ui-review ata-svc-reason ata-svc-rpa ata-svc-firm-connectors ata-svc-coverage ata-ui-review
cd ../.. cd ../..
# Step 9: Start Unleash (may fail, but that's OK) # Step 9: Start Unleash (may fail, but that's OK)
echo "📊 Starting Unleash (may require manual configuration)..." echo "📊 Starting Unleash (may require manual configuration)..."
cd infra/compose cd infra/compose
docker compose -f docker-compose.local.yml up -d unleash || echo "⚠️ Unleash failed to start - may need manual token configuration" docker compose -f docker-compose.local.yml up -d ata-unleash || echo "⚠️ Unleash failed to start - may need manual token configuration"
cd ../.. cd ../..
# Step 10: Final verification # Step 10: Final verification

View File

@@ -33,8 +33,8 @@ bash "$ROOT_DIR/scripts/generate-dev-certs.sh"
# 4) Bring up core infra (detached) # 4) Bring up core infra (detached)
echo "🏗️ Starting Traefik + core infra..." echo "🏗️ Starting Traefik + core infra..."
docker compose -f "$COMPOSE_DIR/docker-compose.local.yml" up -d \ docker compose -f "$COMPOSE_DIR/docker-compose.local.yml" up -d \
traefik authentik-db authentik-redis authentik-server authentik-worker \ ata-traefik ata-authentik-db ata-authentik-redis ata-authentik-server ata-authentik-worker \
vault postgres neo4j qdrant minio redis prometheus grafana loki ata-vault ata-postgres ata-neo4j ata-qdrant ata-minio ata-redis ata-prometheus ata-grafana ata-loki
# 5) Wait for Traefik, then Authentik (initial-setup or login) # 5) Wait for Traefik, then Authentik (initial-setup or login)
echo "⏳ Waiting for Traefik to respond..." echo "⏳ Waiting for Traefik to respond..."
@@ -77,7 +77,7 @@ fi
# 7) Start Authentik outpost if token present # 7) Start Authentik outpost if token present
if [[ -n "${AUTHENTIK_OUTPOST_TOKEN:-}" && "${AUTHENTIK_OUTPOST_TOKEN}" != "changeme" ]]; then if [[ -n "${AUTHENTIK_OUTPOST_TOKEN:-}" && "${AUTHENTIK_OUTPOST_TOKEN}" != "changeme" ]]; then
echo "🔐 Starting Authentik outpost..." echo "🔐 Starting Authentik outpost..."
docker compose -f "$COMPOSE_DIR/docker-compose.local.yml" up -d authentik-outpost || true docker compose -f "$COMPOSE_DIR/docker-compose.local.yml" up -d ata-authentik-outpost || true
else else
echo " Set AUTHENTIK_OUTPOST_TOKEN in $COMPOSE_DIR/.env to start authentik-outpost" echo " Set AUTHENTIK_OUTPOST_TOKEN in $COMPOSE_DIR/.env to start authentik-outpost"
fi fi
@@ -86,9 +86,9 @@ fi
if [[ "${START_APP_SERVICES:-true}" == "true" ]]; then if [[ "${START_APP_SERVICES:-true}" == "true" ]]; then
echo "🚀 Starting application services..." echo "🚀 Starting application services..."
docker compose -f "$COMPOSE_DIR/docker-compose.local.yml" up -d \ docker compose -f "$COMPOSE_DIR/docker-compose.local.yml" up -d \
svc-ingestion svc-extract svc-kg svc-rag-retriever svc-coverage \ ata-svc-ingestion ata-svc-extract ata-svc-kg ata-svc-rag-retriever ata-svc-coverage \
svc-firm-connectors svc-forms svc-hmrc svc-normalize-map svc-ocr \ ata-svc-firm-connectors ata-svc-forms ata-svc-hmrc ata-svc-normalize-map ata-svc-ocr \
svc-rag-indexer svc-reason svc-rpa ui-review unleash || true ata-svc-rag-indexer ata-svc-reason ata-svc-rpa ata-ui-review ata-unleash || true
fi fi
echo "🎉 Dev environment is up" echo "🎉 Dev environment is up"

View File

@@ -1,152 +0,0 @@
#!/bin/bash
# Enable Gitea Container Registry
# This script configures Gitea to support Docker container registry
set -e
REMOTE_HOST="deploy@141.136.35.199"
GITEA_PATH="/opt/compose/gitea"
echo "🔧 Enabling Gitea Container Registry..."
# Step 1: Add packages configuration to Gitea
echo "📝 Step 1: Configuring Gitea packages..."
ssh $REMOTE_HOST << 'EOF'
# Create custom configuration directory if it doesn't exist
sudo mkdir -p /opt/compose/gitea/custom/conf
# Create or update custom app.ini with packages enabled
sudo tee /opt/compose/gitea/custom/conf/app.ini > /dev/null << 'GITEA_CONFIG'
[packages]
ENABLED = true
CHUNKED_UPLOAD_PATH = /data/gitea/tmp/package-upload
[packages.container]
ENABLED = true
GITEA_CONFIG
echo "✅ Gitea configuration created"
EOF
# Step 2: Update Gitea compose file to mount custom config and add registry labels
echo "📝 Step 2: Updating Gitea compose file..."
ssh $REMOTE_HOST << 'EOF'
cd /opt/compose/gitea
# Backup current compose file
sudo cp compose.yaml compose.yaml.backup
# Create updated compose file with registry support
sudo tee compose.yaml > /dev/null << 'COMPOSE_FILE'
---
services:
server:
image: docker.io/gitea/gitea:1.24.5
container_name: gitea-server
env_file:
- ./.env
environment:
- USER_UID=1000
- USER_GID=1000
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}
- GITEA__database__NAME=${POSTGRES_DB:-gitea}
- GITEA__database__USER=${POSTGRES_USER:-gitea}
- GITEA__database__PASSWD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD not set}
- GITEA__server__SSH_PORT=2221
- GITEA__server__ROOT_URL=https://gitea.harkon.co.uk
- GITEA__packages__ENABLED=true
- GITEA__packages__CHUNKED_UPLOAD_PATH=/data/gitea/tmp/package-upload
networks:
- frontend
- backend
volumes:
- gitea-data:/data
- ./custom/conf/app.ini:/data/gitea/conf/app.ini.custom:ro
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "2221:22"
depends_on:
- db
labels:
# Main Gitea web interface
- traefik.enable=true
- traefik.http.services.gitea.loadbalancer.server.port=3000
- traefik.http.services.gitea.loadbalancer.server.scheme=http
- traefik.http.routers.gitea-https.entrypoints=websecure
- traefik.http.routers.gitea-https.rule=Host(`gitea.harkon.co.uk`)
- traefik.http.routers.gitea-https.tls=true
- traefik.http.routers.gitea-https.tls.certresolver=godaddy
- traefik.http.routers.gitea-https.service=gitea
# Container Registry (same port, different subdomain)
- traefik.http.routers.gitea-registry.entrypoints=websecure
- traefik.http.routers.gitea-registry.rule=Host(`registry.harkon.co.uk`)
- traefik.http.routers.gitea-registry.tls=true
- traefik.http.routers.gitea-registry.tls.certresolver=godaddy
- traefik.http.routers.gitea-registry.service=gitea
restart: unless-stopped
db:
image: docker.io/library/postgres:17.5
container_name: gitea-db
environment:
- POSTGRES_USER=${POSTGRES_USER:-gitea}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD not set}
- POSTGRES_DB=${POSTGRES_DB:-gitea}
networks:
- backend
volumes:
- gitea-db:/var/lib/postgresql/data
restart: unless-stopped
volumes:
gitea-data:
driver: local
gitea-db:
driver: local
networks:
frontend:
external: true
backend:
external: true
COMPOSE_FILE
echo "✅ Gitea compose file updated"
EOF
# Step 3: Restart Gitea to apply changes
echo "📝 Step 3: Restarting Gitea..."
ssh $REMOTE_HOST << 'EOF'
cd /opt/compose/gitea
docker compose down
docker compose up -d
echo "⏳ Waiting for Gitea to start..."
sleep 15
echo "✅ Gitea restarted"
EOF
echo ""
echo "✅ Gitea Container Registry enabled successfully!"
echo ""
echo "📋 Next steps:"
echo "1. Verify DNS: dig registry.harkon.co.uk (should point to 141.136.35.199)"
echo "2. Wait for SSL certificate (Traefik will auto-generate)"
echo "3. Create Gitea access token:"
echo " - Login to https://gitea.harkon.co.uk"
echo " - Settings → Applications → Generate New Token"
echo " - Select scope: write:package"
echo "4. Login to registry:"
echo " docker login registry.harkon.co.uk"
echo " Username: <your-gitea-username>"
echo " Password: <your-access-token>"
echo ""
echo "🔍 Check Gitea logs:"
echo " ssh deploy@141.136.35.199 'docker logs gitea-server'"

View File

@@ -11,7 +11,7 @@ echo "🔧 Fixing database issues..."
echo "⏳ Waiting for PostgreSQL to be ready..." echo "⏳ Waiting for PostgreSQL to be ready..."
timeout=60 timeout=60
counter=0 counter=0
while ! docker exec postgres pg_isready -U postgres >/dev/null 2>&1; do while ! docker exec ata-postgres pg_isready -U postgres >/dev/null 2>&1; do
if [ $counter -ge $timeout ]; then if [ $counter -ge $timeout ]; then
echo "❌ PostgreSQL failed to start within $timeout seconds" echo "❌ PostgreSQL failed to start within $timeout seconds"
exit 1 exit 1
@@ -23,14 +23,14 @@ echo "✅ PostgreSQL is ready"
# Create unleash database if it doesn't exist # Create unleash database if it doesn't exist
echo "📊 Creating unleash database if needed..." echo "📊 Creating unleash database if needed..."
docker exec postgres psql -U postgres -tc "SELECT 1 FROM pg_database WHERE datname = 'unleash'" | grep -q 1 || \ docker exec ata-postgres psql -U postgres -tc "SELECT 1 FROM pg_database WHERE datname = 'unleash'" | grep -q 1 || \
docker exec postgres psql -U postgres -c "CREATE DATABASE unleash;" docker exec ata-postgres psql -U postgres -c "CREATE DATABASE unleash;"
echo "✅ Unleash database ready" echo "✅ Unleash database ready"
# Create tax_system database for Authentik if needed # Create tax_system database for Authentik if needed
echo "🔐 Creating tax_system database for Authentik if needed..." echo "🔐 Creating tax_system database for Authentik if needed..."
docker exec postgres psql -U postgres -tc "SELECT 1 FROM pg_database WHERE datname = 'tax_system'" | grep -q 1 || \ docker exec ata-postgres psql -U postgres -tc "SELECT 1 FROM pg_database WHERE datname = 'tax_system'" | grep -q 1 || \
docker exec postgres psql -U postgres -c "CREATE DATABASE tax_system;" docker exec ata-postgres psql -U postgres -c "CREATE DATABASE tax_system;"
echo "✅ Authentik database ready" echo "✅ Authentik database ready"
echo "🎉 Database issues fixed!" echo "🎉 Database issues fixed!"

View File

@@ -1,152 +0,0 @@
#!/bin/bash
# Script to fix Gitea upload size limits for large Docker images
# Run this on the remote server: ssh deploy@141.136.35.199
set -e
echo "=== Gitea Registry Upload Limit Fix ==="
echo ""
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Step 1: Check if Gitea is running
echo -e "${YELLOW}Step 1: Checking Gitea status...${NC}"
if docker ps | grep -q gitea-server; then
echo -e "${GREEN}✓ Gitea is running${NC}"
GITEA_CONTAINER=$(docker ps --filter "name=gitea" --format "{{.Names}}" | head -1)
echo " Container: $GITEA_CONTAINER"
else
echo -e "${RED}✗ Gitea is not running!${NC}"
exit 1
fi
# Step 2: Check if Traefik is running
echo -e "\n${YELLOW}Step 2: Checking Traefik status...${NC}"
if docker ps | grep -q traefik; then
echo -e "${GREEN}✓ Traefik is running${NC}"
TRAEFIK_CONTAINER=$(docker ps --filter "name=traefik" --format "{{.Names}}" | head -1)
echo " Container: $TRAEFIK_CONTAINER"
HAS_TRAEFIK=true
else
echo -e "${YELLOW}⚠ Traefik is not running (may not be needed)${NC}"
HAS_TRAEFIK=false
fi
# Step 3: Find Traefik config directory
if [ "$HAS_TRAEFIK" = true ]; then
echo -e "\n${YELLOW}Step 3: Finding Traefik configuration...${NC}"
# Try to find Traefik config mount
TRAEFIK_CONFIG=$(docker inspect $TRAEFIK_CONTAINER | grep -A 1 '"Destination": "/etc/traefik"' | grep Source | cut -d'"' -f4 || echo "")
if [ -z "$TRAEFIK_CONFIG" ]; then
TRAEFIK_CONFIG="/opt/traefik/config"
echo -e "${YELLOW} Using default: $TRAEFIK_CONFIG${NC}"
else
echo -e "${GREEN} Found: $TRAEFIK_CONFIG${NC}"
fi
# Create config directory if it doesn't exist
sudo mkdir -p "$TRAEFIK_CONFIG"
# Step 4: Create Traefik middleware for large uploads
echo -e "\n${YELLOW}Step 4: Creating Traefik middleware...${NC}"
sudo tee "$TRAEFIK_CONFIG/gitea-large-upload.yml" > /dev/null << 'EOF'
http:
middlewares:
gitea-large-upload:
buffering:
maxRequestBodyBytes: 5368709120 # 5GB
memRequestBodyBytes: 104857600 # 100MB in memory
maxResponseBodyBytes: 5368709120 # 5GB
memResponseBodyBytes: 104857600 # 100MB in memory
retryExpression: "IsNetworkError() && Attempts() < 3"
EOF
echo -e "${GREEN}✓ Created $TRAEFIK_CONFIG/gitea-large-upload.yml${NC}"
# Step 5: Restart Traefik
echo -e "\n${YELLOW}Step 5: Restarting Traefik...${NC}"
docker restart $TRAEFIK_CONTAINER
sleep 3
echo -e "${GREEN}✓ Traefik restarted${NC}"
fi
# Step 6: Update Gitea configuration
echo -e "\n${YELLOW}Step 6: Updating Gitea configuration...${NC}"
# Backup current config
docker exec $GITEA_CONTAINER cp /data/gitea/conf/app.ini /data/gitea/conf/app.ini.backup
echo -e "${GREEN}✓ Backed up app.ini${NC}"
# Check if settings already exist
if docker exec $GITEA_CONTAINER grep -q "LFS_MAX_FILE_SIZE" /data/gitea/conf/app.ini; then
echo -e "${YELLOW} LFS_MAX_FILE_SIZE already configured${NC}"
else
# Add LFS_MAX_FILE_SIZE to [server] section
docker exec $GITEA_CONTAINER sh -c 'echo "LFS_MAX_FILE_SIZE = 5368709120" >> /data/gitea/conf/app.ini'
echo -e "${GREEN}✓ Added LFS_MAX_FILE_SIZE${NC}"
fi
# Check if packages section exists
if docker exec $GITEA_CONTAINER grep -q "\[packages\]" /data/gitea/conf/app.ini; then
echo -e "${YELLOW} [packages] section already exists${NC}"
else
# Add packages section
docker exec $GITEA_CONTAINER sh -c 'cat >> /data/gitea/conf/app.ini << EOF
[packages]
ENABLED = true
CHUNKED_UPLOAD_PATH = /data/gitea/tmp/package-upload
EOF'
echo -e "${GREEN}✓ Added [packages] section${NC}"
fi
# Step 7: Restart Gitea
echo -e "\n${YELLOW}Step 7: Restarting Gitea...${NC}"
docker restart $GITEA_CONTAINER
sleep 5
echo -e "${GREEN}✓ Gitea restarted${NC}"
# Step 8: Test registry endpoint
echo -e "\n${YELLOW}Step 8: Testing registry endpoint...${NC}"
RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" https://gitea.harkon.co.uk/v2/)
if [ "$RESPONSE" = "401" ] || [ "$RESPONSE" = "200" ]; then
echo -e "${GREEN}✓ Registry is accessible (HTTP $RESPONSE)${NC}"
else
echo -e "${RED}✗ Registry returned HTTP $RESPONSE${NC}"
fi
# Step 9: Summary
echo -e "\n${GREEN}=== Configuration Complete ===${NC}"
echo ""
echo "Next steps:"
echo "1. Log in to Gitea registry:"
echo " docker login gitea.harkon.co.uk"
echo ""
echo "2. Test with a small image:"
echo " docker pull alpine:latest"
echo " docker tag alpine:latest gitea.harkon.co.uk/harkon/test:latest"
echo " docker push gitea.harkon.co.uk/harkon/test:latest"
echo ""
echo "3. If successful, build and push base-ml:"
echo " cd /home/deploy/ai-tax-agent"
echo " docker build -f infra/docker/base-ml.Dockerfile -t gitea.harkon.co.uk/harkon/base-ml:v1.0.1 ."
echo " docker push gitea.harkon.co.uk/harkon/base-ml:v1.0.1"
echo ""
if [ "$HAS_TRAEFIK" = true ]; then
echo -e "${YELLOW}⚠ IMPORTANT: You need to add this label to your Gitea container:${NC}"
echo " traefik.http.routers.gitea.middlewares=gitea-large-upload@file"
echo ""
echo " Add it to your Gitea docker-compose.yml and restart:"
echo " docker-compose up -d gitea"
fi

View File

@@ -62,5 +62,4 @@ ping -c 3 gitea.harkon.co.uk
# 15. Check if Traefik is running and configured # 15. Check if Traefik is running and configured
echo -e "\n=== Traefik Status ===" echo -e "\n=== Traefik Status ==="
docker ps | grep traefik docker ps | grep apa-traefik || docker ps | grep traefik

View File

@@ -125,7 +125,7 @@ echo -e "${BLUE}Step 6: Verifying deployment${NC}"
echo "----------------------------" echo "----------------------------"
# Check running containers # Check running containers
ssh ${REMOTE_USER}@${REMOTE_HOST} "docker ps --format 'table {{.Names}}\t{{.Status}}' | grep -E '(vault|minio|postgres|svc-)'" ssh ${REMOTE_USER}@${REMOTE_HOST} "docker ps --format 'table {{.Names}}\t{{.Status}}' | grep -E '(apa-vault|apa-minio|apa-postgres|apa-svc-)'"
echo "" echo ""
echo -e "${GREEN}========================================${NC}" echo -e "${GREEN}========================================${NC}"
@@ -137,4 +137,3 @@ echo "1. Verify services are running: ./scripts/verify-deployment.sh"
echo "2. Check application: https://app.harkon.co.uk" echo "2. Check application: https://app.harkon.co.uk"
echo "3. Review logs if needed: ssh ${REMOTE_USER}@${REMOTE_HOST} 'docker logs <container>'" echo "3. Review logs if needed: ssh ${REMOTE_USER}@${REMOTE_HOST} 'docker logs <container>'"
echo "" echo ""

View File

@@ -58,14 +58,14 @@ echo -e "${YELLOW}1. Checking Infrastructure Services${NC}"
echo "-----------------------------------" echo "-----------------------------------"
# Check containers on remote server # Check containers on remote server
ssh ${REMOTE_USER}@${REMOTE_HOST} "docker ps --format 'table {{.Names}}\t{{.Status}}\t{{.Ports}}'" | grep -E "(vault|minio|postgres|redis|neo4j|qdrant|nats)" || true ssh ${REMOTE_USER}@${REMOTE_HOST} "docker ps --format 'table {{.Names}}\t{{.Status}}\t{{.Ports}}'" | grep -E "(apa-vault|apa-minio|apa-postgres|apa-redis|apa-neo4j|apa-qdrant|apa-nats)" || true
echo "" echo ""
echo -e "${YELLOW}2. Checking Infrastructure Endpoints${NC}" echo -e "${YELLOW}2. Checking Infrastructure Endpoints${NC}"
echo "------------------------------------" echo "------------------------------------"
check_service "Vault" "https://vault.${DOMAIN}/v1/sys/health" || true check_service "Vault" "https://vault.${DOMAIN}/v1/sys/health" || true
check_service "MinIO Console" "https://minio-console.${DOMAIN}" || true check_service "MinIO Console" "https://minio.${DOMAIN}" || true
check_service "Neo4j Browser" "https://neo4j.${DOMAIN}" || true check_service "Neo4j Browser" "https://neo4j.${DOMAIN}" || true
check_service "Qdrant" "https://qdrant.${DOMAIN}" || true check_service "Qdrant" "https://qdrant.${DOMAIN}" || true
@@ -74,7 +74,7 @@ echo -e "${YELLOW}3. Checking Application Services${NC}"
echo "--------------------------------" echo "--------------------------------"
# Check application containers # Check application containers
ssh ${REMOTE_USER}@${REMOTE_HOST} "docker ps --format 'table {{.Names}}\t{{.Status}}'" | grep -E "svc-" || true ssh ${REMOTE_USER}@${REMOTE_HOST} "docker ps --format 'table {{.Names}}\t{{.Status}}'" | grep -E "apa-svc-" || true
echo "" echo ""
echo -e "${YELLOW}4. Checking Application Endpoints${NC}" echo -e "${YELLOW}4. Checking Application Endpoints${NC}"
@@ -116,7 +116,7 @@ echo ""
echo -e "${YELLOW}6. Checking Docker Networks${NC}" echo -e "${YELLOW}6. Checking Docker Networks${NC}"
echo "--------------------------" echo "--------------------------"
ssh ${REMOTE_USER}@${REMOTE_HOST} "docker network ls | grep -E '(frontend|backend)'" || true ssh ${REMOTE_USER}@${REMOTE_HOST} "docker network ls | grep -E '(apa-frontend|apa-backend)'" || true
echo "" echo ""
echo -e "${YELLOW}7. Checking Disk Usage${NC}" echo -e "${YELLOW}7. Checking Disk Usage${NC}"
@@ -135,7 +135,7 @@ echo -e "${YELLOW}9. Recent Container Logs (Last 10 lines)${NC}"
echo "---------------------------------------" echo "---------------------------------------"
# Get logs from a few key services # Get logs from a few key services
for container in vault minio postgres svc-ingestion svc-extract; do for container in apa-vault apa-minio apa-postgres apa-svc-ingestion apa-svc-extract; do
echo -e "\n${BLUE}=== $container ===${NC}" echo -e "\n${BLUE}=== $container ===${NC}"
ssh ${REMOTE_USER}@${REMOTE_HOST} "docker logs $container --tail 10 2>&1" || echo "Container not found" ssh ${REMOTE_USER}@${REMOTE_HOST} "docker logs $container --tail 10 2>&1" || echo "Container not found"
done done
@@ -151,4 +151,3 @@ echo "2. Review logs for errors: ssh ${REMOTE_USER}@${REMOTE_HOST} 'docker logs
echo "3. Access Grafana: https://grafana.${DOMAIN}" echo "3. Access Grafana: https://grafana.${DOMAIN}"
echo "4. Access Application: https://app.${DOMAIN}" echo "4. Access Application: https://app.${DOMAIN}"
echo "" echo ""