diff --git a/.env.example b/.env.example index c05e3e17a..cdc60b5fe 100644 --- a/.env.example +++ b/.env.example @@ -25,7 +25,7 @@ API_BASE_URL=http://localhost:8000 CRON_INTERVAL_MINUTES=5 # Postgres -POSTGRES_SERVER=localhost +POSTGRES_SERVER=db POSTGRES_PORT=5432 POSTGRES_DB=kaapi POSTGRES_USER=postgres diff --git a/.github/workflows/cd-production.yml b/.github/workflows/cd-production.yml index c6febe073..838902cba 100644 --- a/.github/workflows/cd-production.yml +++ b/.github/workflows/cd-production.yml @@ -36,8 +36,47 @@ jobs: REPOSITORY: ${{ vars.AWS_RESOURCE_PREFIX }}-repo TAG: ${{ github.ref_name }} run: | - docker build -t $REGISTRY/$REPOSITORY:latest ./backend + docker build \ + -t $REGISTRY/$REPOSITORY:latest \ + -t $REGISTRY/$REPOSITORY:$TAG \ + ./backend docker push $REGISTRY/$REPOSITORY:latest + docker push $REGISTRY/$REPOSITORY:$TAG + + - name: Run database migrations + env: + CLUSTER: ${{ vars.AWS_RESOURCE_PREFIX }}-cluster + SERVICE: ${{ vars.AWS_RESOURCE_PREFIX }}-service + TASK_DEF: ${{ vars.AWS_RESOURCE_PREFIX }}-task + run: | + CONTAINER_NAME=$(aws ecs describe-task-definition --task-definition $TASK_DEF --query 'taskDefinition.containerDefinitions[0].name' --output text) + + NETWORK_CONFIG=$(aws ecs describe-services --cluster $CLUSTER --services $SERVICE \ + --query 'services[0].networkConfiguration' --output json | jq -c '.') + + TASK_ARN=$(aws ecs run-task \ + --cluster $CLUSTER \ + --launch-type FARGATE \ + --task-definition $TASK_DEF \ + --count 1 \ + --network-configuration "$NETWORK_CONFIG" \ + --overrides "{\"containerOverrides\":[{\"name\":\"$CONTAINER_NAME\",\"command\":[\"uv\",\"run\",\"alembic\",\"upgrade\",\"head\"]}]}" \ + --query 'tasks[0].taskArn' --output text) + + echo "Migration task started: $TASK_ARN" + aws ecs wait tasks-stopped --cluster $CLUSTER --tasks "$TASK_ARN" + + EXIT_CODE=$(aws ecs describe-tasks --cluster $CLUSTER --tasks "$TASK_ARN" \ + --query "tasks[0].containers[?name=='$CONTAINER_NAME'].exitCode | [0]" --output text) + + if [ "$EXIT_CODE" != "0" ]; then + echo "Migration failed with exit code $EXIT_CODE" + aws ecs describe-tasks --cluster $CLUSTER --tasks "$TASK_ARN" \ + --query "tasks[0].{stopCode:stopCode,stoppedReason:stoppedReason,containerReason:containers[?name=='$CONTAINER_NAME'].reason | [0]}" \ + --output json + exit 1 + fi + echo "Migration completed successfully" - name: Deploy to ECS run: | diff --git a/.github/workflows/cd-staging-ec2.yml b/.github/workflows/cd-staging-ec2.yml new file mode 100644 index 000000000..0cd318f71 --- /dev/null +++ b/.github/workflows/cd-staging-ec2.yml @@ -0,0 +1,59 @@ +name: Deploy Kaapi staging to EC2 + +on: + push: + branches: [main] + workflow_dispatch: + +concurrency: + group: staging-ec2-deploy + cancel-in-progress: false + +jobs: + deploy: + runs-on: ubuntu-latest + environment: AWS_STAGING_ENV + + permissions: + id-token: write + contents: read + + steps: + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v6 + with: + role-to-assume: ${{ secrets.STAGING_EC2_DEPLOY_ROLE_ARN }} + aws-region: ap-south-1 + + - name: Trigger deploy on EC2 via SSM + id: ssm + env: + INSTANCE_ID: ${{ secrets.STAGING_EC2_INSTANCE_ID }} + run: | + CMD_ID=$(aws ssm send-command \ + --instance-ids "$INSTANCE_ID" \ + --document-name "AWS-RunShellScript" \ + --comment "Deploy kaapi-backend kaapi-staging" \ + --parameters commands='["set -eux","chown -R ubuntu:ubuntu /data/kaapi-backend","sudo -iu ubuntu bash -lc \"cd /data/kaapi-backend && git fetch --all && git pull origin main && docker compose -f docker-compose.staging.yml build && docker compose -f docker-compose.staging.yml --profile migrate run --rm migrate && docker compose -f docker-compose.staging.yml up -d --remove-orphans && docker image prune -f\""]' \ + --cloud-watch-output-config CloudWatchOutputEnabled=true \ + --query "Command.CommandId" --output text) + echo "cmd_id=$CMD_ID" >> "$GITHUB_OUTPUT" + echo "Sent SSM command: $CMD_ID" + + - name: Wait for SSM command to finish + env: + INSTANCE_ID: ${{ secrets.STAGING_EC2_INSTANCE_ID }} + CMD_ID: ${{ steps.ssm.outputs.cmd_id }} + run: | + WAIT_EXIT=0 + aws ssm wait command-executed \ + --command-id "$CMD_ID" \ + --instance-id "$INSTANCE_ID" || WAIT_EXIT=$? + + aws ssm get-command-invocation \ + --command-id "$CMD_ID" \ + --instance-id "$INSTANCE_ID" \ + --query '{Status:Status,Stdout:StandardOutputContent,Stderr:StandardErrorContent}' \ + --output json + + exit $WAIT_EXIT diff --git a/.github/workflows/cd-staging.yml b/.github/workflows/cd-staging-ecs.yml similarity index 100% rename from .github/workflows/cd-staging.yml rename to .github/workflows/cd-staging-ecs.yml diff --git a/docker-compose.staging.yml b/docker-compose.staging.yml new file mode 100644 index 000000000..36b5fa8ee --- /dev/null +++ b/docker-compose.staging.yml @@ -0,0 +1,67 @@ +version: "3.9" + +services: + backend: + image: "${DOCKER_IMAGE_BACKEND?Variable not set}:${TAG:-latest}" + container_name: backend + restart: always + build: + context: ./backend + env_file: + - .env + ports: + - "8000:80" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:80/api/v1/utils/health/"] + interval: 10s + timeout: 5s + retries: 5 + command: > + uv run uvicorn app.main:app --host 0.0.0.0 --port 80 --reload + develop: + watch: + # Sync backend source code into container immediately on change + - action: sync + path: ./backend/app + target: /app/app + # Rebuild image if dependencies change + - action: rebuild + path: ./backend/pyproject.toml + - action: rebuild + path: ./backend/uv.lock + logging: + driver: awslogs + options: + awslogs-region: ap-south-1 + awslogs-group: /ec2/kaapi-staging + awslogs-stream: backend + awslogs-create-group: "true" + + migrate: + image: "${DOCKER_IMAGE_BACKEND?Variable not set}:${TAG:-latest}" + build: + context: ./backend + env_file: + - .env + profiles: ["migrate"] + command: ["uv", "run", "alembic", "upgrade", "head"] + + celery_worker: + image: "${DOCKER_IMAGE_BACKEND?Variable not set}:${TAG:-latest}" + container_name: celery-worker + restart: always + build: + context: ./backend + depends_on: + backend: + condition: service_healthy + env_file: + - .env + command: ["uv", "run", "celery", "-A", "app.celery.celery_app", "worker", "--loglevel=info"] + logging: + driver: awslogs + options: + awslogs-region: ap-south-1 + awslogs-group: /ec2/kaapi-staging + awslogs-stream: celery-worker + awslogs-create-group: "true"