diff --git a/buildrunner/docker/multiplatform_image_builder.py b/buildrunner/docker/multiplatform_image_builder.py index d10b42d..e3bd584 100644 --- a/buildrunner/docker/multiplatform_image_builder.py +++ b/buildrunner/docker/multiplatform_image_builder.py @@ -11,9 +11,10 @@ import re import shutil import tempfile +from threading import Thread import uuid -from multiprocessing import Process, SimpleQueue -from typing import Dict, List, Optional, Union +from queue import SimpleQueue +from typing import Dict, Iterator, List, Optional, Union import python_on_whales import timeout_decorator @@ -186,6 +187,10 @@ def _get_build_cache_options(self, builder: Optional[str]) -> dict: actual_builder = builder or "default" return cache_options if actual_builder in self._cache_builders else {} + def _log_buildx(self, logs_itr: Iterator[str], platform: str): + for log in logs_itr: + LOGGER.info(f"[{platform}] {log}".strip()) + # pylint: disable=too-many-arguments,too-many-locals def _build_with_inject( self, @@ -233,7 +238,7 @@ def _build_with_inject( context_dir ), f"Failed to create context dir {context_dir}" - docker.buildx.build( + logs_itr = docker.buildx.build( context_dir, tags=[image_ref], platforms=[platform], @@ -244,8 +249,10 @@ def _build_with_inject( build_args=build_args, cache=cache, pull=pull, + stream_logs=True, **self._get_build_cache_options(builder), ) + self._log_buildx(logs_itr, platform) @staticmethod def _get_image_digest(image_ref: str) -> str: @@ -313,7 +320,7 @@ def _build_single_image( pull=pull, ) else: - docker.buildx.build( + logs_itr = docker.buildx.build( path, tags=[image_ref], platforms=[platform], @@ -324,8 +331,11 @@ def _build_single_image( builder=builder, cache=cache, pull=pull, + stream_logs=True, **self._get_build_cache_options(builder), ) + self._log_buildx(logs_itr, platform) + # Push after the initial load to support remote builders that cannot access the local registry docker.push([image_ref]) @@ -373,7 +383,7 @@ def build_multiple_images( path: str = ".", file: str = "Dockerfile", target: Optional[str] = None, - do_multiprocessing: bool = True, + use_threading: bool = True, build_args: dict = None, inject: dict = None, cache: bool = False, @@ -385,7 +395,7 @@ def build_multiple_images( :arg platforms: The platforms to build the image for (e.g. linux/amd64) :arg path: The path to the Dockerfile. Defaults to ".". :arg file: The path/name of the Dockerfile (i.e. /Dockerfile). Defaults to "Dockerfile". - :arg do_multiprocessing: Whether to use multiprocessing to build the images. Defaults to True. + :arg use_threading: Whether to use threading to build the images. Defaults to True. :arg build_args: The build args to pass to docker. Defaults to None. :arg inject: The files to inject into the build context. Defaults to None. :arg cache: If true, enables cache, defaults to False @@ -444,12 +454,12 @@ def build_multiple_images( ) LOGGER.info(OUTPUT_LINE) - processes = [] + threads = [] LOGGER.info( - f'Starting builds for {len(platforms)} platforms in {"parallel" if do_multiprocessing else "sequence"}' + f'Starting builds for {len(platforms)} platforms in {"parallel" if use_threading else "sequence"}' ) - # Since multiprocessing may be used, use a simple queue to communicate with the build method + # Since threading may be used, use a simple queue to communicate with the build method # This queue receives tuples of (image_ref, image_digest) image_info_by_image_ref = {} queue = SimpleQueue() @@ -475,9 +485,9 @@ def build_multiple_images( pull, ) LOGGER.debug(f"Building {repo} for {platform}") - if do_multiprocessing: - processes.append( - Process( + if use_threading: + threads.append( + Thread( target=self._build_single_image, args=build_single_image_args, ) @@ -485,11 +495,11 @@ def build_multiple_images( else: self._build_single_image(*build_single_image_args) - # Start and join processes in parallel if multiprocessing is enabled - for proc in processes: - proc.start() - for proc in processes: - proc.join() + # Start and join threads in parallel if enabled + for thread in threads: + thread.start() + for thread in threads: + thread.join() while not queue.empty(): image_ref, image_digest = queue.get() @@ -563,7 +573,7 @@ def push(self) -> None: # Push each tag individually in order to prevent strange errors with multiple matching tags for image_ref in tagged_image.image_refs: self._push_with_timeout(source_image_refs, [image_ref]) - # Process finished within timeout + # Finished within timeout LOGGER.info( f"Successfully pushed multiplatform image(s) {tagged_image}" ) diff --git a/setup.py b/setup.py index ad7ff46..3bfc9cd 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ from setuptools import setup, find_packages -BASE_VERSION = "3.9" +BASE_VERSION = "3.10" SOURCE_DIR = os.path.dirname(os.path.abspath(__file__)) BUILDRUNNER_DIR = os.path.join(SOURCE_DIR, "buildrunner") diff --git a/tests/test_multiplatform.py b/tests/test_multiplatform.py index 606531b..3a45067 100644 --- a/tests/test_multiplatform.py +++ b/tests/test_multiplatform.py @@ -95,7 +95,7 @@ def test_start_local_registry_on_build(): platforms=["linux/arm64", "linux/amd64"], path=test_path, file=f"{test_path}/Dockerfile", - do_multiprocessing=False, + use_threading=False, ) # Check that the registry is running and only one is found with that name @@ -122,7 +122,7 @@ def test_start_local_registry_on_build(): platforms=["linux/arm64", "linux/amd64"], path=test_path, file=f"{test_path}/Dockerfile", - do_multiprocessing=False, + use_threading=False, ) registry_name = mpib._mp_registry_info.name @@ -145,7 +145,7 @@ def test_tag_native_platform(platforms, expected_image_tags): platforms, path=test_path, file=f"{test_path}/Dockerfile", - do_multiprocessing=False, + use_threading=False, ) assert ( @@ -183,7 +183,7 @@ def test_tag_native_platform_multiple_tags(name, platforms, expected_image_tags) platforms=platforms, path=test_path, file=f"{test_path}/Dockerfile", - do_multiprocessing=False, + use_threading=False, ) assert ( @@ -222,7 +222,7 @@ def test_tag_native_platform_keep_images(name, platforms, expected_image_tags): platforms=platforms, path=test_path, file=f"{test_path}/Dockerfile", - do_multiprocessing=False, + use_threading=False, ) assert ( @@ -273,7 +273,7 @@ def test_push(): platforms=platforms, path=test_path, file=f"{test_path}/Dockerfile", - do_multiprocessing=False, + use_threading=False, ) assert built_image is not None @@ -317,7 +317,7 @@ def test_push_with_dest_names(): platforms=platforms, path=test_path, file=f"{test_path}/Dockerfile", - do_multiprocessing=False, + use_threading=False, ) assert built_image is not None @@ -386,7 +386,7 @@ def test_build( platforms=platforms, path=test_path, file=f"{test_path}/Dockerfile", - do_multiprocessing=False, + use_threading=False, ) assert len(built_image.built_images) == len(platforms) @@ -429,7 +429,7 @@ def test_build_multiple_builds( platforms=platforms1, path=test_path, file=f"{test_path}/Dockerfile", - do_multiprocessing=False, + use_threading=False, ) # Build set 2 @@ -437,7 +437,7 @@ def test_build_multiple_builds( platforms=platforms2, path=test_path, file=f"{test_path}/Dockerfile", - do_multiprocessing=False, + use_threading=False, ) # Check set 1 @@ -476,6 +476,7 @@ def test_build_multiple_builds( cache_to=None, pull=False, target=None, + stream_logs=True, ), call( test_path, @@ -490,6 +491,7 @@ def test_build_multiple_builds( cache_to=None, pull=False, target=None, + stream_logs=True, ), call( test_path, @@ -504,6 +506,7 @@ def test_build_multiple_builds( cache_to=None, pull=False, target=None, + stream_logs=True, ), call( test_path, @@ -518,6 +521,7 @@ def test_build_multiple_builds( cache_to=None, pull=False, target=None, + stream_logs=True, ), ] assert mock_push.call_count == 4 @@ -568,7 +572,7 @@ def test_use_build_registry(): platforms=["linux/arm64", "linux/amd64"], path=test_path, file=f"{test_path}/Dockerfile", - do_multiprocessing=False, + use_threading=False, ) assert all( image_info.repo.startswith(f"{build_registry}/")