Skip to content

chore: rollout workflow #15

chore: rollout workflow

chore: rollout workflow #15

Workflow file for this run

# Call Bytebase API to create the SQL Rollout
name: SQL Rollout
on:
pull_request:
branches:
- main
paths:
- "**.up.sql"
jobs:
bytebase-sql-rollout:
runs-on: ubuntu-latest
name: SQL Rollout
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Fetch all history for all tags and branches
run: git fetch --depth=1 origin +refs/heads/*:refs/remotes/origin/*
- name: Setup environment variables
id: setup_env
run: |
echo "bytebase_api=https://bytebase-ci.zeabur.app/v1" >> $GITHUB_OUTPUT
echo "[email protected]" >> $GITHUB_OUTPUT
echo "database_id=instances/prod-instance/databases/example" >> $GITHUB_OUTPUT
echo "project_id=example" >> $GITHUB_OUTPUT
- name: Exchange token
id: exchange_token
run: |
bytebase_token=$(curl -v ${{ steps.setup_env.outputs.bytebase_api }}/auth/login \
--data-raw '{"email":"${{ steps.setup_env.outputs.bytebase_account }}","password":"${{ secrets.BYTEBASE_PASSWORD }}","web":true}' \
--compressed 2>&1 | grep token | grep -o 'access-token=[^;]*;' | grep -o '[^;]*' | sed 's/access-token=//g; s/;//g')
echo "bytebase_token=${bytebase_token}" >> $GITHUB_OUTPUT
- name: Create sheet per SQL script
id: create_sheet
run: |
echo "Finding changed SQL files..."
# Get the list of changed files in this PR
PR_NUM=$(echo ${{ github.event.number }})
FILES=$(gh pr view $PR_NUM --json files --jq '.files[].path' | grep -E '\.up.sql$')
HAS_ERROR_OR_WARNING=false
sheet_ids=()
# Iterate over each changed SQL file and call SQL check API
for FILE in $FILES; do
echo "$FILE"
content=$(base64 "$FILE")
request_body=$(jq -n \
--arg title "GitHub $PR_NUM" \
--arg content "$content" \
'$ARGS.named')
echo "::debug::Request body: ${request_body}"
response=$(curl -s -w "HTTPSTATUS:%{http_code}" -X POST -o sheet.json ${{ steps.setup_env.outputs.bytebase_api }}/projects/${{ steps.setup_env.outputs.project_id }}/sheets \
-H 'Content-Type: application/json' \
-H "Authorization: Bearer ${{ steps.exchange_token.outputs.bytebase_token }}" \
-d "${request_body}")
http_status=$(echo "$response" | grep -oE 'HTTPSTATUS:[0-9]+' | grep -oE '[0-9]+')
if [ ${http_status} != 200 ]; then
echo "::error ::Failed to create SQL sheet ${FILE} with response code ${http_status}"
exit 1
fi
# Emit annotations for each advice
echo "::debug::Sheet: $(cat sheet.json)"
sheet_id=$(jq -r '.name' sheet.json)
echo "Created sheet $sheet_id"
sheet_ids+=("$sheet_id")
done
# Convert the array to a newline-separated string and store it in GITHUB_OUTPUT
echo "::set-output name=sheet_ids::$(IFS=,; echo "${sheet_ids[*]}")"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Create rollout plan
id: create_plan
run: |
echo "1 ${{steps.create_sheet.outputs.sheet_ids}}"
# Convert newline-separated string back into an array
IFS=$',' read -r -a sheet_ids <<< "${{ steps.create_sheet.outputs.sheet_ids }}"
echo "2 ${sheet_ids[*]}"
# Initialize an empty array for the specs
specs=()
# Populate the specs array with the desired structure, inserting each base64-encoded content
for sheet_id in "${sheet_ids[@]}"; do
echo "Sheet ID: $sheet_id"
spec=$(jq -n \
--arg sheet "$sheet_id" \
'{id: "af187be2-dea2-439e-9531-35ebf6faa6f9", change_database_config: {target: "${{ steps.setup_env.outputs.database_id }}", sheet: $sheet, type: "MIGRATE"}}')
specs+=("$spec")
done
echo "Specs array content:"
printf '%s\n' "${specs[@]}"
# Combine the specs into a single JSON array
json_specs=$(jq -n --argjson specs "$(jq -s '.' <<< "${specs[@]}")" '$specs')
# Construct the final JSON structure with the specs array
final_json=$(jq -n --argjson specs "$json_specs" '{"steps": [{"specs": $specs}]}')
echo "Final JSON:"
echo "$final_json"