Behaviour
Recently I had this multiarch build that took 1:27h to report a build failure in one arch.
It seems GH runners have 2 CPU, which makes a simple multiarch build such as that one (using 5 platforms) awfully slow.
My guess (which might be wrong) is that it should be possible to use 1 runner per arch and merge the resulting images in a single multiarch one.
Steps to reproduce this issue
- Create a multiarch build.
Expected behaviour
It shouldn't be extremely slow.
Actual behaviour
It is extremely slow.
Configuration
name: ci
on:
pull_request:
push:
branches:
- main
tags:
- "*"
workflow_dispatch:
inputs:
pytest_addopts:
description:
Extra options for pytest; use -vv for full details; see
https://docs.pytest.org/en/latest/example/simple.html#how-to-change-command-line-options-defaults
required: false
env:
IMAGE_NAME: tecnativa/ci-base
LANG: "en_US.utf-8"
LC_ALL: "en_US.utf-8"
PIP_CACHE_DIR: ${{ github.workspace }}/.cache.~/pip
PIPX_HOME: ${{ github.workspace }}/.cache.~/pipx
POETRY_CACHE_DIR: ${{ github.workspace }}/.cache.~/pypoetry
POETRY_VIRTUALENVS_IN_PROJECT: "true"
PYTEST_ADDOPTS: ${{ github.event.inputs.pytest_addopts }}
PYTHONIOENCODING: "UTF-8"
jobs:
build-push-test-deploy:
strategy:
matrix:
python: [3.9]
runs-on: ubuntu-latest
services:
registry:
image: registry:2
ports:
- 5000:5000
steps:
- uses: actions/checkout@v2
# Build and push
- uses: actions/cache@v2
with:
path: |
/tmp/.buildx-cache
key:
buildx|${{ secrets.CACHE_DATE }}|${{ runner.os }}|${{
hashFiles('Dockerfile') }}
- uses: docker/setup-qemu-action@v1
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
driver-opts: network=host
install: true
- name: Docker meta for local images
id: docker_meta_local
uses: crazy-max/ghaction-docker-meta@v1
with:
images: localhost:5000/${{ env.IMAGE_NAME }}
tag-edge: true
tag-semver: |
{{version}}
{{major}}.{{minor}}
- name: Build and push to local registry
uses: docker/build-push-action@v2
with:
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache,mode=max
context: .
file: ./Dockerfile
labels: ${{ steps.docker_meta_local.outputs.labels }}
push: true
tags: ${{ steps.docker_meta_local.outputs.tags }}
# Limit to platforms supported by base image
platforms: |
linux/amd64
linux/arm/v7
linux/arm64/v8
linux/ppc64le
linux/s390x
# Test (only in local arch)
- name: Install python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- uses: actions/cache@v2
with:
path: |
.cache.~
.venv
~/.local/bin
key:
venv|${{ secrets.CACHE_DATE }}|${{ runner.os }}|${{ matrix.python }}|${{
hashFiles('pyproject.toml', 'poetry.lock') }}
- run: pip install poetry
- name: Patch $PATH
run: echo "$HOME/.local/bin" >> $GITHUB_PATH
- run: poetry install
- run: poetry run pytest --image localhost:5000/${{ env.IMAGE_NAME }}
# Deploy (only outside of pull requests)
- name: Docker meta for public images
id: docker_meta_public
if: github.event_name != 'pull_request'
uses: crazy-max/ghaction-docker-meta@v1
with:
images: ghcr.io/${{ env.IMAGE_NAME }}
tag-edge: true
tag-semver: |
{{version}}
{{major}}.{{minor}}
- name: Login to GitHub Container Registry
uses: docker/login-action@v1
if: github.event_name != 'pull_request'
with:
registry: ghcr.io
username: ${{ secrets.BOT_LOGIN }}
password: ${{ secrets.BOT_TOKEN }}
- name: Push to public registry
uses: docker/build-push-action@v2
if: github.event_name != 'pull_request'
with:
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache,mode=max
context: .
file: ./Dockerfile
labels: ${{ steps.docker_meta_public.outputs.labels }}
push: true
tags: ${{ steps.docker_meta_public.outputs.tags }}
# Limit to platforms supported by base image
platforms: |
linux/amd64
linux/arm/v7
linux/arm64/v8
linux/ppc64le
linux/s390x
Logs
logs_41.zip
@joao-p-marques @Tecnativa TT25794
Behaviour
Recently I had this multiarch build that took 1:27h to report a build failure in one arch.
It seems GH runners have 2 CPU, which makes a simple multiarch build such as that one (using 5 platforms) awfully slow.
My guess (which might be wrong) is that it should be possible to use 1 runner per arch and merge the resulting images in a single multiarch one.
Steps to reproduce this issue
Expected behaviour
It shouldn't be extremely slow.
Actual behaviour
It is extremely slow.
Configuration
Logs
logs_41.zip
@joao-p-marques @Tecnativa TT25794