Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ API_BASE_URL=http://localhost:8000
CRON_INTERVAL_MINUTES=5

# Postgres
POSTGRES_SERVER=localhost
POSTGRES_SERVER=db
POSTGRES_PORT=5432
POSTGRES_DB=kaapi
POSTGRES_USER=postgres
Expand Down
41 changes: 40 additions & 1 deletion .github/workflows/cd-production.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,47 @@ jobs:
REPOSITORY: ${{ vars.AWS_RESOURCE_PREFIX }}-repo
TAG: ${{ github.ref_name }}
run: |
docker build -t $REGISTRY/$REPOSITORY:latest ./backend
docker build \
-t $REGISTRY/$REPOSITORY:latest \
-t $REGISTRY/$REPOSITORY:$TAG \
./backend
docker push $REGISTRY/$REPOSITORY:latest
docker push $REGISTRY/$REPOSITORY:$TAG

- name: Run database migrations
env:
CLUSTER: ${{ vars.AWS_RESOURCE_PREFIX }}-cluster
SERVICE: ${{ vars.AWS_RESOURCE_PREFIX }}-service
TASK_DEF: ${{ vars.AWS_RESOURCE_PREFIX }}-task
run: |
CONTAINER_NAME=$(aws ecs describe-task-definition --task-definition $TASK_DEF --query 'taskDefinition.containerDefinitions[0].name' --output text)

NETWORK_CONFIG=$(aws ecs describe-services --cluster $CLUSTER --services $SERVICE \
--query 'services[0].networkConfiguration' --output json | jq -c '.')

TASK_ARN=$(aws ecs run-task \
--cluster $CLUSTER \
--launch-type FARGATE \
--task-definition $TASK_DEF \
--count 1 \
--network-configuration "$NETWORK_CONFIG" \
--overrides "{\"containerOverrides\":[{\"name\":\"$CONTAINER_NAME\",\"command\":[\"uv\",\"run\",\"alembic\",\"upgrade\",\"head\"]}]}" \
--query 'tasks[0].taskArn' --output text)
Comment thread
coderabbitai[bot] marked this conversation as resolved.

echo "Migration task started: $TASK_ARN"
aws ecs wait tasks-stopped --cluster $CLUSTER --tasks "$TASK_ARN"

EXIT_CODE=$(aws ecs describe-tasks --cluster $CLUSTER --tasks "$TASK_ARN" \
--query "tasks[0].containers[?name=='$CONTAINER_NAME'].exitCode | [0]" --output text)

if [ "$EXIT_CODE" != "0" ]; then
echo "Migration failed with exit code $EXIT_CODE"
aws ecs describe-tasks --cluster $CLUSTER --tasks "$TASK_ARN" \
--query "tasks[0].{stopCode:stopCode,stoppedReason:stoppedReason,containerReason:containers[?name=='$CONTAINER_NAME'].reason | [0]}" \
--output json
exit 1
fi
echo "Migration completed successfully"

Comment thread
coderabbitai[bot] marked this conversation as resolved.
- name: Deploy to ECS
run: |
Expand Down
59 changes: 59 additions & 0 deletions .github/workflows/cd-staging-ec2.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
name: Deploy Kaapi staging to EC2

on:
push:
branches: [main]
workflow_dispatch:
Comment thread
Ayush8923 marked this conversation as resolved.

concurrency:
group: staging-ec2-deploy
cancel-in-progress: false

jobs:
deploy:
runs-on: ubuntu-latest
environment: AWS_STAGING_ENV

permissions:
id-token: write
contents: read

steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6
with:
role-to-assume: ${{ secrets.STAGING_EC2_DEPLOY_ROLE_ARN }}
aws-region: ap-south-1

- name: Trigger deploy on EC2 via SSM
id: ssm
env:
INSTANCE_ID: ${{ secrets.STAGING_EC2_INSTANCE_ID }}
run: |
CMD_ID=$(aws ssm send-command \
--instance-ids "$INSTANCE_ID" \
--document-name "AWS-RunShellScript" \
--comment "Deploy kaapi-backend kaapi-staging" \
--parameters commands='["set -eux","chown -R ubuntu:ubuntu /data/kaapi-backend","sudo -iu ubuntu bash -lc \"cd /data/kaapi-backend && git fetch --all && git pull origin main && docker compose -f docker-compose.staging.yml build && docker compose -f docker-compose.staging.yml --profile migrate run --rm migrate && docker compose -f docker-compose.staging.yml up -d --remove-orphans && docker image prune -f\""]' \
--cloud-watch-output-config CloudWatchOutputEnabled=true \
--query "Command.CommandId" --output text)
echo "cmd_id=$CMD_ID" >> "$GITHUB_OUTPUT"
echo "Sent SSM command: $CMD_ID"

- name: Wait for SSM command to finish
env:
INSTANCE_ID: ${{ secrets.STAGING_EC2_INSTANCE_ID }}
CMD_ID: ${{ steps.ssm.outputs.cmd_id }}
run: |
WAIT_EXIT=0
aws ssm wait command-executed \
--command-id "$CMD_ID" \
--instance-id "$INSTANCE_ID" || WAIT_EXIT=$?

aws ssm get-command-invocation \
--command-id "$CMD_ID" \
--instance-id "$INSTANCE_ID" \
--query '{Status:Status,Stdout:StandardOutputContent,Stderr:StandardErrorContent}' \
--output json

exit $WAIT_EXIT
Comment thread
Ayush8923 marked this conversation as resolved.
File renamed without changes.
67 changes: 67 additions & 0 deletions docker-compose.staging.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
version: "3.9"

services:
backend:
image: "${DOCKER_IMAGE_BACKEND?Variable not set}:${TAG:-latest}"
container_name: backend
restart: always
build:
context: ./backend
env_file:
- .env
ports:
- "8000:80"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:80/api/v1/utils/health/"]
interval: 10s
timeout: 5s
retries: 5
command: >
uv run uvicorn app.main:app --host 0.0.0.0 --port 80 --reload
develop:
watch:
# Sync backend source code into container immediately on change
- action: sync
path: ./backend/app
target: /app/app
# Rebuild image if dependencies change
- action: rebuild
path: ./backend/pyproject.toml
- action: rebuild
path: ./backend/uv.lock
logging:
driver: awslogs
options:
awslogs-region: ap-south-1
awslogs-group: /ec2/kaapi-staging
awslogs-stream: backend
awslogs-create-group: "true"

migrate:
image: "${DOCKER_IMAGE_BACKEND?Variable not set}:${TAG:-latest}"
build:
context: ./backend
env_file:
- .env
profiles: ["migrate"]
command: ["uv", "run", "alembic", "upgrade", "head"]

celery_worker:
image: "${DOCKER_IMAGE_BACKEND?Variable not set}:${TAG:-latest}"
container_name: celery-worker
restart: always
build:
context: ./backend
depends_on:
backend:
condition: service_healthy
env_file:
- .env
command: ["uv", "run", "celery", "-A", "app.celery.celery_app", "worker", "--loglevel=info"]
logging:
driver: awslogs
options:
awslogs-region: ap-south-1
awslogs-group: /ec2/kaapi-staging
awslogs-stream: celery-worker
awslogs-create-group: "true"
Loading