diff --git a/.dockerignore b/.dockerignore index 1f5579978..5f13236dd 100644 --- a/.dockerignore +++ b/.dockerignore @@ -15,6 +15,8 @@ !docs/ # We use the spec file !contrib/ +# This is used to add content on top of our default base +!hack/ # The systemd units and baseimage bits end up in installs !systemd/ !baseimage/ diff --git a/.github/workflows/build-and-publish.yml b/.github/workflows/build-and-publish.yml index 817778f9b..f4caa7a13 100644 --- a/.github/workflows/build-and-publish.yml +++ b/.github/workflows/build-and-publish.yml @@ -42,7 +42,9 @@ jobs: fi - name: Build container - run: just build-integration-test-image + # TODO: Also consider building + publishing an image that is just "base + bootc" + # as this implicitly changed to also publish our integration test images. + run: just build - name: Login to ghcr.io uses: redhat-actions/podman-login@v1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 103e8fbb6..e3ab65449 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -85,9 +85,9 @@ jobs: --tmpfs /var/lib/containers \ -v /run/dbus:/run/dbus -v /run/systemd:/run/systemd localhost/bootc /src/crates/ostree-ext/ci/priv-integration.sh # Nondestructive but privileged tests - sudo bootc-integration-tests host-privileged localhost/bootc-integration-install + sudo bootc-integration-tests host-privileged localhost/bootc-install # Install tests - sudo bootc-integration-tests install-alongside localhost/bootc-integration-install + sudo bootc-integration-tests install-alongside localhost/bootc-install # system-reinstall-bootc tests cargo build --release -p system-reinstall-bootc @@ -97,7 +97,7 @@ jobs: sudo install -m 0755 target/release/system-reinstall-bootc /usr/bin/system-reinstall-bootc # These tests may mutate the system live so we can't run in parallel - sudo bootc-integration-tests system-reinstall localhost/bootc-integration --test-threads=1 + sudo bootc-integration-tests system-reinstall localhost/bootc --test-threads=1 # And the fsverity case sudo podman run --privileged --pid=host localhost/bootc-fsverity bootc install to-existing-root --stateroot=other \ @@ -189,9 +189,9 @@ jobs: - name: Build container run: | - just build-integration-test-image-from-package target/packages + BOOTC_SKIP_PACKAGE=1 just build # Extra cross-check (duplicating the integration test) that we're using the right base - used_vid=$(podman run --rm localhost/bootc-integration bash -c '. /usr/lib/os-release && echo ${ID}-${VERSION_ID}') + used_vid=$(podman run --rm localhost/bootc bash -c '. /usr/lib/os-release && echo ${ID}-${VERSION_ID}') test ${{ matrix.test_os }} = "${used_vid}" - name: Unit and container integration tests @@ -206,28 +206,59 @@ jobs: fi just clean-local-images - - name: Run TMT test about bootc install on coreos + - name: Archive TMT logs + if: always() + uses: actions/upload-artifact@v6 + with: + name: tmt-log-PR-${{ github.event.number }}-${{ matrix.test_os }}-${{ matrix.variant }}-${{ env.ARCH }} + path: /var/tmp/tmt + + # Test bootc install on Fedora CoreOS (separate job to avoid disk space issues + # when run in the same job as test-integration). + # Uses fedora-43 as it's the current stable Fedora release matching CoreOS. + test-coreos: + needs: package + runs-on: ubuntu-24.04 + + steps: + - uses: actions/checkout@v6 + - name: Bootc Ubuntu Setup + uses: ./.github/actions/bootc-ubuntu-setup + with: + libvirt: true + - name: Install tmt + run: pip install --user "tmt[provision-virtual]" + + - name: Setup env run: | - # Only test fedora-43 on fedora-coreos:testing-devel - if [ "${{ matrix.test_os }}" = "fedora-43" ] && [ "${{ matrix.variant }}" = "ostree" ]; then - just build-testimage-coreos target/packages - just test-tmt-on-coreos plan-bootc-install-on-coreos - just clean-local-images - else - echo "skipped" - fi + BASE=$(just pullspec-for-os base fedora-43) + echo "BOOTC_base=${BASE}" >> $GITHUB_ENV + echo "BOOTC_variant=ostree" >> $GITHUB_ENV + + - name: Download package artifacts + uses: actions/download-artifact@v7 + with: + name: packages-fedora-43 + path: target/packages/ + + - name: Build container and test on CoreOS + run: | + BOOTC_SKIP_PACKAGE=1 just build + just build-testimage-coreos target/packages + just test-tmt-on-coreos plan-bootc-install-on-coreos + just clean-local-images - name: Archive TMT logs if: always() uses: actions/upload-artifact@v6 with: - name: tmt-log-PR-${{ github.event.number }}-${{ matrix.test_os }}-${{ matrix.variant }}-${{ env.ARCH }} + name: tmt-log-PR-${{ github.event.number }}-fedora-43-coreos-${{ env.ARCH }} path: /var/tmp/tmt # Sentinel job for required checks - configure this job name in repository settings required-checks: if: always() - needs: [cargo-deny, validate, package, test-integration] + needs: [cargo-deny, validate, package, test-integration, test-coreos] runs-on: ubuntu-latest steps: - run: exit 1 @@ -235,4 +266,5 @@ jobs: needs.cargo-deny.result != 'success' || needs.validate.result != 'success' || needs.package.result != 'success' || - needs.test-integration.result != 'success' + needs.test-integration.result != 'success' || + needs.test-coreos.result != 'success' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 151cd0efb..b395bd58b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -129,7 +129,7 @@ There's a small set of tests which are designed to run inside a bootc container and are built into the default container image: ``` -$ podman run --rm -ti localhost/bootc bootc-integration-tests container +$ just test-container ``` ## Submitting a patch diff --git a/Dockerfile b/Dockerfile index 499c7199b..ca821407f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,14 +15,10 @@ COPY . /src FROM scratch as packaging COPY contrib/packaging / -FROM $base as base -# Mark this as a test image (moved from --label build flag to fix layer caching) -LABEL bootc.testimage="1" - # This image installs build deps, pulls in our source code, and installs updated # bootc binaries in /out. The intention is that the target rootfs is extracted from /out # back into a final stage (without the build deps etc) below. -FROM base as buildroot +FROM $base as buildroot # Flip this off to disable initramfs code ARG initramfs=1 # This installs our buildroot, and we want to cache it independently of the rest. @@ -40,8 +36,41 @@ FROM buildroot as sdboot-content # Writes to /out RUN /src/contrib/packaging/configure-systemdboot download +# We always do a "from scratch" build +# https://docs.fedoraproject.org/en-US/bootc/building-from-scratch/ +# because this fixes https://github.com/containers/composefs-rs/issues/132 +# NOTE: Until we have https://gitlab.com/fedora/bootc/base-images/-/merge_requests/317 +# this stage will end up capturing whatever RPMs we find at this time. +# NOTE: This is using the *stock* bootc binary, not the one we want to build from +# local sources. We'll override it later. +# NOTE: All your base belong to me. +FROM $base as target-base +RUN /usr/libexec/bootc-base-imagectl build-rootfs --manifest=standard /target-rootfs + +FROM scratch as base +COPY --from=target-base /target-rootfs/ / +COPY --from=src /src/hack/ /run/hack/ +# SKIP_CONFIGS=1 skips LBIs, test kargs, and install configs (for FCOS testing) +ARG SKIP_CONFIGS +RUN cd /run/hack/ && SKIP_CONFIGS="${SKIP_CONFIGS}" ./provision-derived.sh +# Note we don't do any customization here yet +# Mark this as a test image +LABEL bootc.testimage="1" +# Otherwise standard metadata +LABEL containers.bootc 1 +LABEL ostree.bootable 1 +# https://pagure.io/fedora-kiwi-descriptions/pull-request/52 +ENV container=oci +# Optional labels that only apply when running this image as a container. These keep the default entry point running under systemd. +STOPSIGNAL SIGRTMIN+3 +CMD ["/sbin/init"] + +# ------------- +# external dependency cutoff point: # NOTE: Every RUN instruction past this point should use `--network=none`; we want to ensure # all external dependencies are clearly delineated. +# This is verified in `cargo xtask check-buildsys`. +# ------------- FROM buildroot as build # Version for RPM build (optional, computed from git in Justfile) @@ -50,7 +79,7 @@ ARG pkgversion ARG SOURCE_DATE_EPOCH ENV SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH} # Build RPM directly from source, using cached target directory -RUN --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome --network=none RPM_VERSION="${pkgversion}" /src/contrib/packaging/build-rpm +RUN --network=none --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome RPM_VERSION="${pkgversion}" /src/contrib/packaging/build-rpm FROM buildroot as sdboot-signed # The secureboot key and cert are passed via Justfile @@ -66,11 +95,11 @@ FROM build as units # A place that we're more likely to be able to set xattrs VOLUME /var/tmp ENV TMPDIR=/var/tmp -RUN --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome --network=none make install-unit-tests +RUN --network=none --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome make install-unit-tests # This just does syntax checking FROM buildroot as validate -RUN --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome --network=none make validate +RUN --network=none --mount=type=cache,target=/src/target --mount=type=cache,target=/var/roothome make validate # Common base for final images: configures variant, rootfs, and injects extra content FROM base as final-common @@ -80,22 +109,12 @@ RUN --network=none --mount=type=bind,from=packaging,target=/run/packaging \ --mount=type=bind,from=sdboot-signed,target=/run/sdboot-signed \ /run/packaging/configure-variant "${variant}" ARG rootfs="" -RUN --mount=type=bind,from=packaging,target=/run/packaging /run/packaging/configure-rootfs "${variant}" "${rootfs}" +RUN --network=none --mount=type=bind,from=packaging,target=/run/packaging /run/packaging/configure-rootfs "${variant}" "${rootfs}" COPY --from=packaging /usr-extras/ /usr/ -# Default target for source builds (just build) -# Installs packages from the internal build stage +# Final target: installs pre-built packages from /run/packages volume mount. +# Use with: podman build --target=final -v path/to/packages:/run/packages:ro FROM final-common as final -RUN --mount=type=bind,from=packaging,target=/run/packaging \ - --mount=type=bind,from=build,target=/build-output \ - --network=none \ - /run/packaging/install-rpm-and-setup /build-output/out -RUN bootc container lint --fatal-warnings - -# Alternative target for pre-built packages (CI workflow) -# Use with: podman build --target=final-from-packages -v path/to/packages:/run/packages:ro -FROM final-common as final-from-packages -RUN --mount=type=bind,from=packaging,target=/run/packaging \ - --network=none \ +RUN --network=none --mount=type=bind,from=packaging,target=/run/packaging \ /run/packaging/install-rpm-and-setup /run/packages -RUN bootc container lint --fatal-warnings +RUN --network=none bootc container lint --fatal-warnings diff --git a/Justfile b/Justfile index a8172c1d9..4b0ffad54 100644 --- a/Justfile +++ b/Justfile @@ -17,10 +17,8 @@ # This image is just the base image plus our updated bootc binary base_img := "localhost/bootc" -# Derives from the above and adds nushell, cloudinit etc. -integration_img := base_img + "-integration" # Has a synthetic upgrade -integration_upgrade_img := integration_img + "-upgrade" +upgrade_img := base_img + "-upgrade" # ostree: The default # composefs-sealeduki-sdboot: A system with a sealed composefs using systemd-boot @@ -43,10 +41,34 @@ lbi_images := "quay.io/curl/curl:latest quay.io/curl/curl-base:latest registry.a generic_buildargs := "" # Args for package building (no secrets needed, just builds RPMs) base_buildargs := generic_buildargs + " --build-arg=base=" + base + " --build-arg=variant=" + variant -buildargs := base_buildargs + " --secret=id=secureboot_key,src=target/test-secureboot/db.key --secret=id=secureboot_cert,src=target/test-secureboot/db.crt" +# - scratch builds need extra perms per https://docs.fedoraproject.org/en-US/bootc/building-from-scratch/ +# - we do secure boot signing here, so provide the keys +buildargs := base_buildargs \ + + " --cap-add=all --security-opt=label=type:container_runtime_t --device /dev/fuse" \ + + " --secret=id=secureboot_key,src=target/test-secureboot/db.key --secret=id=secureboot_cert,src=target/test-secureboot/db.crt" # Args for build-sealed (no base arg, it sets that itself) sealed_buildargs := "--build-arg=variant=" + variant + " --secret=id=secureboot_key,src=target/test-secureboot/db.key --secret=id=secureboot_cert,src=target/test-secureboot/db.crt" +# The default target: build the container image from current sources. +# Note commonly you might want to override the base image via e.g. +# `just build --build-arg=base=quay.io/fedora/fedora-bootc:42` +# into the container image. +# +# Note you can set `BOOTC_SKIP_PACKAGE=1` in the environment to bypass this stage. +build: package _keygen && _pull-lbi-images + #!/bin/bash + set -xeuo pipefail + test -d target/packages + # Resolve to absolute path for podman volume mount + # Use :z for SELinux relabeling + pkg_path=$(realpath target/packages) + podman build --target=final -v "${pkg_path}":/run/packages:ro,z -t {{base_img}}-bin {{buildargs}} . + ./hack/build-sealed {{variant}} {{base_img}}-bin {{base_img}} {{sealed_buildargs}} + +# Pull images used by hack/lbi +_pull-lbi-images: + podman pull -q --retry 5 --retry-delay 5s {{lbi_images}} + # Compute SOURCE_DATE_EPOCH and VERSION from git for reproducible builds. # Outputs shell variable assignments that can be eval'd. _git-build-vars: @@ -68,23 +90,6 @@ _git-build-vars: # Needed by bootc install on ostree fedora-coreos := "quay.io/fedora/fedora-coreos:testing-devel" - -# The default target: build the container image from current sources. -# Note commonly you might want to override the base image via e.g. -# `just build --build-arg=base=quay.io/fedora/fedora-bootc:42` -# -# The Dockerfile builds RPMs internally in its 'build' stage, so we don't need -# to call 'package' first. This avoids cache invalidation from external files. -build: _keygen - #!/bin/bash - set -xeuo pipefail - eval $(just _git-build-vars) - podman build {{base_buildargs}} --target=final \ - --build-arg=SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH} \ - --build-arg=pkgversion=${VERSION} \ - -t {{base_img}}-bin {{buildargs}} . - ./hack/build-sealed {{variant}} {{base_img}}-bin {{base_img}} {{sealed_buildargs}} - # Generate Secure Boot keys (only for our own CI/testing) _keygen: ./hack/generate-secureboot-keys @@ -93,74 +98,37 @@ _keygen: build-sealed: @just --justfile {{justfile()}} variant=composefs-sealeduki-sdboot build -# Build packages (e.g. RPM) using a container buildroot -_packagecontainer: - #!/bin/bash - set -xeuo pipefail - eval $(just _git-build-vars) - echo "Building RPM with version: ${VERSION}" - podman build {{base_buildargs}} --build-arg=SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH} --build-arg=pkgversion=${VERSION} -t localhost/bootc-pkg --target=build . - # Build packages (e.g. RPM) into target/packages/ # Any old packages will be removed. -package: _packagecontainer - mkdir -p target/packages - rm -vf target/packages/*.rpm - podman run --rm localhost/bootc-pkg tar -C /out/ -cf - . | tar -C target/packages/ -xvf - - chmod a+rx target target/packages - chmod a+r target/packages/*.rpm - podman rmi localhost/bootc-pkg - -# Copy pre-existing packages from PATH into target/packages/ -# Note: This is mainly for CI artifact extraction; build-from-package -# now uses volume mounts directly instead of copying to target/packages/. -copy-packages-from PATH: +# Set BOOTC_SKIP_PACKAGE=1 in the environment to bypass this stage. We don't +# yet have an accurate ability to avoid rebuilding this in CI yet. +package: #!/bin/bash set -xeuo pipefail - if ! compgen -G "{{PATH}}/*.rpm" > /dev/null; then - echo "Error: No packages found in {{PATH}}" >&2 - exit 1 + packages=target/packages + if test -n "${BOOTC_SKIP_PACKAGE:-}"; then + if test '!' -d "${packages}"; then + echo "BOOTC_SKIP_PACKAGE is set, but missing ${packages}" 1>&2; exit 1 + fi + exit 0 fi - mkdir -p target/packages - rm -vf target/packages/*.rpm - cp -v {{PATH}}/*.rpm target/packages/ - chmod a+rx target target/packages - chmod a+r target/packages/*.rpm - -# Build the container image using pre-existing packages from PATH -# Uses the 'final-from-packages' target with a volume mount to inject packages, -# avoiding Docker context cache invalidation issues. -build-from-package PATH: _keygen - #!/bin/bash - set -xeuo pipefail - # Resolve to absolute path for podman volume mount - # Use :z for SELinux relabeling - pkg_path=$(realpath "{{PATH}}") - podman build {{base_buildargs}} --target=final-from-packages -v "${pkg_path}":/run/packages:ro,z -t {{base_img}}-bin {{buildargs}} . - ./hack/build-sealed {{variant}} {{base_img}}-bin {{base_img}} {{sealed_buildargs}} - -# Pull images used by hack/lbi -_pull-lbi-images: - podman pull -q --retry 5 --retry-delay 5s {{lbi_images}} - -# This container image has additional testing content and utilities -build-integration-test-image: build _pull-lbi-images - cd hack && podman build {{base_buildargs}} -t {{integration_img}}-bin -f Containerfile . - ./hack/build-sealed {{variant}} {{integration_img}}-bin {{integration_img}} {{sealed_buildargs}} - -# Build integration test image using pre-existing packages from PATH -build-integration-test-image-from-package PATH: _pull-lbi-images - @just build-from-package {{PATH}} - cd hack && podman build {{base_buildargs}} -t {{integration_img}}-bin -f Containerfile . - ./hack/build-sealed {{variant}} {{integration_img}}-bin {{integration_img}} {{sealed_buildargs}} + eval $(just _git-build-vars) + echo "Building RPM with version: ${VERSION}" + podman build {{base_buildargs}} --build-arg=SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH} --build-arg=pkgversion=${VERSION} -t localhost/bootc-pkg --target=build . + mkdir -p "${packages}" + rm -vf "${packages}"/*.rpm + podman run --rm localhost/bootc-pkg tar -C /out/ -cf - . | tar -C "${packages}"/ -xvf - + chmod a+rx target "${packages}" + chmod a+r "${packages}"/*.rpm + # Keep localhost/bootc-pkg for layer caching; use `just clean-local-images` to reclaim space # Build+test using the `composefs-sealeduki-sdboot` variant. test-composefs: just variant=composefs-sealeduki-sdboot test-tmt readonly local-upgrade-reboot # Only used by ci.yml right now -build-install-test-image: build-integration-test-image - cd hack && podman build {{base_buildargs}} -t {{integration_img}}-install -f Containerfile.drop-lbis +build-install-test-image: build + cd hack && podman build {{base_buildargs}} -t {{base_img}}-install -f Containerfile.drop-lbis # These tests accept the container image as input, and may spawn it. run-container-external-tests: @@ -182,43 +150,47 @@ validate: # # To run an individual test, pass it as an argument like: # `just test-tmt readonly` -# -# To run the integration tests, execute `just test-tmt integration` -test-tmt *ARGS: build-integration-test-image _build-upgrade-image +test-tmt *ARGS: build + @just _build-upgrade-image @just test-tmt-nobuild {{ARGS}} # Generate a local synthetic upgrade _build-upgrade-image: - cat tmt/tests/Dockerfile.upgrade | podman build -t {{integration_upgrade_img}}-bin --from={{integration_img}}-bin - - ./hack/build-sealed {{variant}} {{integration_upgrade_img}}-bin {{integration_upgrade_img}} {{sealed_buildargs}} + cat tmt/tests/Dockerfile.upgrade | podman build -t {{upgrade_img}}-bin --from={{base_img}}-bin - + ./hack/build-sealed {{variant}} {{upgrade_img}}-bin {{upgrade_img}} {{sealed_buildargs}} -# Assume the localhost/bootc-integration image is up to date, and just run tests. +# Assume the localhost/bootc image is up to date, and just run tests. # Useful for iterating on tests quickly. test-tmt-nobuild *ARGS: - cargo xtask run-tmt --env=BOOTC_variant={{variant}} --upgrade-image={{integration_upgrade_img}} {{integration_img}} {{ARGS}} + cargo xtask run-tmt --env=BOOTC_variant={{variant}} --upgrade-image={{upgrade_img}} {{base_img}} {{ARGS}} # Build test container image for testing on coreos with SKIP_CONFIGS=1, -# without configs and no curl container image -build-testimage-coreos PATH: - @just build-from-package {{PATH}} - cd hack && podman build {{base_buildargs}} --build-arg SKIP_CONFIGS=1 -t {{integration_img}}-coreos -f Containerfile . +# which skips LBIs, test kargs, and install configs that would conflict with FCOS. +build-testimage-coreos PATH: _keygen + #!/bin/bash + set -xeuo pipefail + pkg_path=$(realpath "{{PATH}}") + podman build --target=final -v "${pkg_path}":/run/packages:ro,z \ + --build-arg SKIP_CONFIGS=1 \ + -t {{base_img}}-coreos-bin {{buildargs}} . + ./hack/build-sealed {{variant}} {{base_img}}-coreos-bin {{base_img}}-coreos {{sealed_buildargs}} # Run test bootc install on FCOS -# BOOTC_target is `bootc-integration-coreos`, it will be used for bootc install. +# BOOTC_target is `bootc-coreos`, it will be used for bootc install. # Run `just build-testimage-coreos target/packages` to build test image firstly, # then run `just test-tmt-on-coreos plan-bootc-install-on-coreos` test-tmt-on-coreos *ARGS: - cargo xtask run-tmt --env=BOOTC_variant={{variant}} --env=BOOTC_target={{integration_img}}-coreos:latest {{fedora-coreos}} {{ARGS}} + cargo xtask run-tmt --env=BOOTC_variant={{variant}} --env=BOOTC_target={{base_img}}-coreos:latest {{fedora-coreos}} {{ARGS}} # Cleanup all test VMs created by tmt tests tmt-vm-cleanup: bcvk libvirt rm --stop --force --label bootc.test=1 # Run tests (unit and integration) that are containerized -test-container: build-units build-integration-test-image +test-container: build build-units podman run --rm --read-only localhost/bootc-units /usr/bin/bootc-units # Pass these through for cross-checking - podman run --rm --env=BOOTC_variant={{variant}} --env=BOOTC_base={{base}} {{integration_img}} bootc-integration-tests container + podman run --rm --env=BOOTC_variant={{variant}} --env=BOOTC_base={{base}} {{base_img}} bootc-integration-tests container # Remove all container images built (locally) via this Justfile, by matching a label clean-local-images: diff --git a/ci/Containerfile.install-fsverity b/ci/Containerfile.install-fsverity index a47c2964f..de585773c 100644 --- a/ci/Containerfile.install-fsverity +++ b/ci/Containerfile.install-fsverity @@ -1,5 +1,6 @@ # Enable fsverity at install time -FROM localhost/bootc +# Use bootc-install (not bootc) to avoid LBIs that can't be fetched in this test environment +FROM localhost/bootc-install RUN < /usr/lib/ostree/prepare-root.conf < OstreeTarWriter<'a, W> { // first thing. self.append_dir(rootpath, metadata)?; - // Now, we create sysroot/ and everything under it - self.write_repo_structure()?; + if !self.options.raw { + // Now, we create sysroot/ and everything under it + self.write_repo_structure()?; - self.append_commit_object()?; + self.append_commit_object()?; - // The ostree dirmeta object for the root. - self.append(ostree::ObjectType::DirMeta, metadata_checksum, &metadata_v)?; + // The ostree dirmeta object for the root. + self.append(ostree::ObjectType::DirMeta, metadata_checksum, &metadata_v)?; + } // Recurse and write everything else. self.append_dirtree( @@ -642,6 +644,35 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { } } +/// Append xattrs to the tar stream as PAX extensions, excluding security.selinux +/// which doesn't become visible in container runtimes anyway. +/// https://github.com/containers/storage/blob/0d4a8d2aaf293c9f0464b888d932ab5147a284b9/pkg/archive/archive.go#L85 +#[context("Writing tar xattrs")] +fn append_pax_xattrs( + out: &mut tar::Builder, + xattrs: &glib::Variant, +) -> Result<()> { + let v = xattrs.data_as_bytes(); + let v = v.try_as_aligned().unwrap(); + let v = gvariant::gv!("a(ayay)").cast(v); + let mut pax_extensions = Vec::new(); + for entry in v { + let (k, v) = entry.to_tuple(); + let k = CStr::from_bytes_with_nul(k).unwrap(); + let k = k + .to_str() + .with_context(|| format!("Found non-UTF8 xattr: {k:?}"))?; + if k == SECURITY_SELINUX_XATTR { + continue; + } + pax_extensions.push((format!("SCHILY.xattr.{k}"), v)); + } + if !pax_extensions.is_empty() { + out.append_pax_extensions(pax_extensions.iter().map(|(k, v)| (k.as_str(), *v)))?; + } + Ok(()) +} + /// Recursively walk an OSTree commit and generate data into a `[tar::Builder]` /// which contains all of the metadata objects, as well as a hardlinked /// stream that looks like a checkout. Extended attributes are stored specially out @@ -652,14 +683,141 @@ fn impl_export( out: &mut tar::Builder, options: ExportOptions, ) -> Result<()> { + if options.raw { + return impl_raw_export(repo, commit_checksum, out); + } let writer = &mut OstreeTarWriter::new(repo, commit_checksum, out, options)?; writer.write_commit()?; Ok(()) } +/// Export an ostree commit as a "raw" tar stream - just the filesystem content +/// with `/usr/etc` -> `/etc` remapping, without ostree repository structure. +fn impl_raw_export( + repo: &ostree::Repo, + commit_checksum: &str, + out: &mut tar::Builder, +) -> Result<()> { + let cancellable = gio::Cancellable::NONE; + let (root, _) = repo.read_commit(commit_checksum, cancellable)?; + let root = root + .downcast::() + .expect("read_commit returns RepoFile"); + root.ensure_resolved()?; + raw_export_dir(repo, out, &root, Utf8Path::new("")) +} + +/// Recursively export a directory for raw export mode. +fn raw_export_dir( + repo: &ostree::Repo, + out: &mut tar::Builder, + dir: &ostree::RepoFile, + path: &Utf8Path, +) -> Result<()> { + let cancellable = gio::Cancellable::NONE; + let queryattrs = "standard::name,standard::type"; + let queryflags = gio::FileQueryInfoFlags::NOFOLLOW_SYMLINKS; + let e = dir.enumerate_children(queryattrs, queryflags, cancellable)?; + + while let Some(info) = e.next_file(cancellable)? { + let name = info.name(); + let name = name + .to_str() + .ok_or_else(|| anyhow!("Invalid UTF-8 filename: {:?}", name))?; + let child_path = path.join(name); + + // Apply /usr/etc -> /etc remapping + let output_path = map_path_v1(&child_path); + + // Get the child and downcast to RepoFile + let child = dir.child(name); + let child = child + .downcast::() + .expect("child of RepoFile is RepoFile"); + child.ensure_resolved()?; + + let file_type = info.file_type(); + match file_type { + gio::FileType::Regular | gio::FileType::SymbolicLink => { + // Get the checksum and load the file via the repo + let checksum = child.checksum(); + let (instream, meta, xattrs) = repo.load_file(&checksum, cancellable)?; + + // Write xattrs as PAX extensions (before the file entry) + append_pax_xattrs(out, &xattrs)?; + + let mut h = tar::Header::new_gnu(); + h.set_uid(meta.attribute_uint32("unix::uid") as u64); + h.set_gid(meta.attribute_uint32("unix::gid") as u64); + // Filter out the file type bits from mode for tar + h.set_mode(meta.attribute_uint32("unix::mode") & !libc::S_IFMT); + + if let Some(instream) = instream { + // Regular file + h.set_entry_type(tar::EntryType::Regular); + h.set_size(meta.size() as u64); + let mut instream = BufReader::with_capacity(BUF_CAPACITY, instream.into_read()); + out.append_data(&mut h, output_path, &mut instream) + .with_context(|| format!("Writing {child_path}"))?; + } else { + // Symlink + h.set_entry_type(tar::EntryType::Symlink); + h.set_size(0); + + let target = meta + .symlink_target() + .ok_or_else(|| anyhow!("Missing symlink target for {child_path}"))?; + let target = target + .to_str() + .ok_or_else(|| anyhow!("Invalid UTF-8 symlink target: {target:?}"))?; + + // Handle "denormal" symlinks that contain "//" + if symlink_is_denormal(target) { + h.set_link_name_literal(target) + .with_context(|| format!("Setting symlink target for {child_path}"))?; + out.append_data(&mut h, output_path, &mut std::io::empty()) + .with_context(|| format!("Writing symlink {child_path}"))?; + } else { + out.append_link(&mut h, output_path, target) + .with_context(|| format!("Writing symlink {child_path}"))?; + } + } + } + gio::FileType::Directory => { + // For directories, query metadata directly from the RepoFile + let dir_meta_checksum = child.tree_get_metadata_checksum().ok_or_else(|| { + anyhow!("Missing metadata checksum for directory {child_path}") + })?; + let meta_v = repo.load_variant(ostree::ObjectType::DirMeta, &dir_meta_checksum)?; + let metadata = + ostree::DirMetaParsed::from_variant(&meta_v).context("Parsing dirmeta")?; + + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Directory); + h.set_uid(metadata.uid as u64); + h.set_gid(metadata.gid as u64); + h.set_mode(metadata.mode & !libc::S_IFMT); + h.set_size(0); + out.append_data(&mut h, output_path, std::io::empty()) + .with_context(|| format!("Writing directory {child_path}"))?; + + raw_export_dir(repo, out, &child, &child_path)?; + } + o => anyhow::bail!("Unsupported file type {o:?} for {child_path}"), + } + } + Ok(()) +} + /// Configuration for tar export. #[derive(Debug, PartialEq, Eq, Default)] -pub struct ExportOptions; +pub struct ExportOptions { + /// If true, output a "raw" filesystem tree without the ostree repository + /// structure (no /sysroot/ostree/repo, no commit/dirtree/dirmeta objects, + /// no hardlinks into the object store). The `/usr/etc` -> `/etc` remapping + /// is still performed. + pub raw: bool, +} /// Export an ostree commit to an (uncompressed) tar archive stream. #[context("Exporting commit")] @@ -719,7 +877,7 @@ pub(crate) fn export_chunk( ) -> Result<()> { // For chunking, we default to format version 1 #[allow(clippy::needless_update)] - let opts = ExportOptions; + let opts = ExportOptions::default(); let writer = &mut OstreeTarWriter::new(repo, commit, out, opts)?; writer.write_repo_structure()?; write_chunk(writer, chunk, create_parent_dirs) @@ -734,7 +892,7 @@ pub(crate) fn export_final_chunk( out: &mut tar::Builder, create_parent_dirs: bool, ) -> Result<()> { - let options = ExportOptions; + let options = ExportOptions::default(); let writer = &mut OstreeTarWriter::new(repo, commit_checksum, out, options)?; // For the final chunk, output the commit object, plus all ostree metadata objects along with // the containing directories. diff --git a/crates/ostree-ext/tests/it/main.rs b/crates/ostree-ext/tests/it/main.rs index 2dfb77970..fb2ecbac6 100644 --- a/crates/ostree-ext/tests/it/main.rs +++ b/crates/ostree-ext/tests/it/main.rs @@ -814,6 +814,120 @@ async fn test_export_as_container_derived() -> Result<()> { Ok(()) } +/// Verify that when we export a container image, /etc content is properly +/// remapped from /usr/etc back to /etc in the exported OCI layers. +/// This is a regression test for https://github.com/bootc-dev/bootc/issues/1864 +#[tokio::test] +async fn test_export_etc_remapping() -> Result<()> { + if !check_skopeo() { + return Ok(()); + } + let fixture = Fixture::new_v1()?; + // Export into an OCI directory + let src_imgref = fixture.export_container().await.unwrap().0; + + // Build a derived image with /etc content + let derived_path = &fixture.path.join("derived.oci"); + let srcpath = src_imgref.name.as_str(); + oci_clone(srcpath, derived_path).await.unwrap(); + let test_etc_content = "test etc content for export"; + ostree_ext::integrationtest::generate_derived_oci_from_tar( + derived_path, + |w| { + let mut layer_tar = tar::Builder::new(w); + let mut h = tar::Header::new_gnu(); + h.set_uid(0); + h.set_gid(0); + h.set_size(0); + h.set_mode(0o755); + h.set_entry_type(tar::EntryType::Directory); + layer_tar.append_data(&mut h.clone(), "etc", &mut std::io::empty())?; + h.set_mode(0o644); + h.set_size(test_etc_content.len().try_into().unwrap()); + h.set_entry_type(tar::EntryType::Regular); + layer_tar.append_data( + &mut h.clone(), + "etc/export-test.conf", + std::io::Cursor::new(test_etc_content.as_bytes()), + )?; + layer_tar.finish()?; + Ok(()) + }, + None, + None, + )?; + + let derived_imgref = ImageReference { + transport: Transport::OciDir, + name: derived_path.to_string(), + }; + + // Import the derived image into the ostree store + let initimport = fixture.must_import(&derived_imgref).await?; + + // Verify the file is in /usr/etc in the ostree commit (as expected) + { + let r = fixture + .destrepo() + .read_commit(&initimport.merge_commit, gio::Cancellable::NONE)? + .0; + let testfile = r.resolve_relative_path("usr/etc/export-test.conf"); + let testfile = testfile.downcast_ref::().unwrap(); + testfile.ensure_resolved()?; + } + + // Export it via store::export + let exported_ocidir_name = "exported.ocidir"; + let dest = ImageReference { + transport: Transport::OciDir, + name: format!("{}:exported-test", fixture.path.join(exported_ocidir_name)), + }; + fixture.dir.create_dir(exported_ocidir_name)?; + let ocidir = ocidir::OciDir::ensure(fixture.dir.open_dir(exported_ocidir_name)?)?; + let _exported = store::export(fixture.destrepo(), &derived_imgref, &dest, None) + .await + .unwrap(); + + // Now verify the exported image has /etc/export-test.conf (not /usr/etc/export-test.conf) + let idx = ocidir.read_index()?; + let desc = idx.manifests().first().unwrap(); + let manifest: oci_image::ImageManifest = ocidir.read_json_blob(desc).unwrap(); + + // Check all layers for our test file + let mut found_etc_file = false; + let mut found_usr_etc_file = false; + for layer in manifest.layers() { + let mut blob = ocidir + .read_blob(layer) + .map(BufReader::new) + .map(flate2::read::GzDecoder::new) + .map(tar::Archive::new)?; + for entry in blob.entries()? { + let entry = entry?; + let path = entry.path()?; + let path_str = path.to_string_lossy(); + if path_str == "etc/export-test.conf" { + found_etc_file = true; + } + if path_str == "usr/etc/export-test.conf" { + found_usr_etc_file = true; + } + } + } + + // The file should be in /etc, not /usr/etc + assert!( + found_etc_file, + "Expected /etc/export-test.conf in exported image, found_usr_etc_file={found_usr_etc_file}" + ); + assert!( + !found_usr_etc_file, + "Did not expect /usr/etc/export-test.conf in exported image" + ); + + Ok(()) +} + #[tokio::test] async fn test_unencapsulate_unbootable() -> Result<()> { if !check_skopeo() { diff --git a/crates/tests-integration/src/install.rs b/crates/tests-integration/src/install.rs index 8487c0354..66db08bc6 100644 --- a/crates/tests-integration/src/install.rs +++ b/crates/tests-integration/src/install.rs @@ -38,7 +38,7 @@ fn delete_ostree_deployments(sh: &Shell, image: &str) -> Result<(), anyhow::Erro if !Path::new("/ostree/deploy/").exists() { return Ok(()); } - let mounts = &["-v", "/ostree:/ostree", "-v", "/boot:/boot"]; + let mounts = &["-v", "/ostree:/sysroot/ostree", "-v", "/boot:/boot"]; cmd!( sh, "sudo {BASE_ARGS...} {mounts...} {image} bootc state wipe-ostree" diff --git a/crates/xtask/src/buildsys.rs b/crates/xtask/src/buildsys.rs new file mode 100644 index 000000000..9f26a288a --- /dev/null +++ b/crates/xtask/src/buildsys.rs @@ -0,0 +1,165 @@ +//! Build system validation checks. + +use std::collections::BTreeMap; + +use anyhow::{Context, Result}; +use camino::{Utf8Path, Utf8PathBuf}; +use fn_error_context::context; +use xshell::{cmd, Shell}; + +const DOCKERFILE_NETWORK_CUTOFF: &str = "external dependency cutoff point"; + +/// Check build system properties +/// +/// - Reproducible builds for the RPM +/// - Dockerfile network isolation after cutoff point +#[context("Checking build system")] +pub fn check_buildsys(sh: &Shell, dockerfile_path: &Utf8Path) -> Result<()> { + check_package_reproducibility(sh)?; + check_dockerfile_network_isolation(dockerfile_path)?; + Ok(()) +} + +/// Verify that consecutive `just package` invocations produce identical RPM checksums. +#[context("Checking package reproducibility")] +fn check_package_reproducibility(sh: &Shell) -> Result<()> { + println!("Checking reproducible builds..."); + // Helper to compute SHA256 of bootc RPMs in target/packages/ + fn get_rpm_checksums(sh: &Shell) -> Result> { + // Find bootc*.rpm files in target/packages/ + let packages_dir = Utf8Path::new("target/packages"); + let mut rpm_files: Vec = Vec::new(); + for entry in std::fs::read_dir(packages_dir).context("Reading target/packages")? { + let entry = entry?; + let path = Utf8PathBuf::try_from(entry.path())?; + if path.extension() == Some("rpm") { + rpm_files.push(path); + } + } + + assert!(!rpm_files.is_empty()); + + let mut checksums = BTreeMap::new(); + for rpm_path in &rpm_files { + let output = cmd!(sh, "sha256sum {rpm_path}").read()?; + let (hash, filename) = output + .split_once(" ") + .with_context(|| format!("failed to parse sha256sum output: '{}'", output))?; + checksums.insert(filename.to_owned(), hash.to_owned()); + } + Ok(checksums) + } + + cmd!(sh, "just package").run()?; + let first_checksums = get_rpm_checksums(sh)?; + cmd!(sh, "just package").run()?; + let second_checksums = get_rpm_checksums(sh)?; + + itertools::assert_equal(first_checksums, second_checksums); + println!("ok package reproducibility"); + + Ok(()) +} + +/// Verify that all RUN instructions in the Dockerfile after the network cutoff +/// point include `--network=none`. +#[context("Checking Dockerfile network isolation")] +fn check_dockerfile_network_isolation(dockerfile_path: &Utf8Path) -> Result<()> { + println!("Checking Dockerfile network isolation..."); + let dockerfile = std::fs::read_to_string(dockerfile_path).context("Reading Dockerfile")?; + verify_dockerfile_network_isolation(&dockerfile)?; + println!("ok Dockerfile network isolation"); + Ok(()) +} + +const RUN_NETWORK_NONE: &str = "RUN --network=none"; + +/// Verify that all RUN instructions after the network cutoff marker start with +/// `RUN --network=none`. +/// +/// Returns Ok(()) if all RUN instructions comply, or an error listing violations. +pub fn verify_dockerfile_network_isolation(dockerfile: &str) -> Result<()> { + // Find the cutoff point + let cutoff_line = dockerfile + .lines() + .position(|line| line.contains(DOCKERFILE_NETWORK_CUTOFF)) + .ok_or_else(|| { + anyhow::anyhow!( + "Dockerfile missing '{}' marker comment", + DOCKERFILE_NETWORK_CUTOFF + ) + })?; + + // Check all RUN instructions after the cutoff point + let mut errors = Vec::new(); + + for (idx, line) in dockerfile.lines().enumerate().skip(cutoff_line + 1) { + let line_num = idx + 1; // 1-based line numbers + let trimmed = line.trim(); + + // Check if this is a RUN instruction + if trimmed.starts_with("RUN ") { + // Must start with exactly "RUN --network=none" + if !trimmed.starts_with(RUN_NETWORK_NONE) { + errors.push(format!( + " line {}: RUN instruction must start with `{}`", + line_num, RUN_NETWORK_NONE + )); + } + } + } + + if !errors.is_empty() { + anyhow::bail!( + "Dockerfile has RUN instructions after '{}' that don't start with `{}`:\n{}", + DOCKERFILE_NETWORK_CUTOFF, + RUN_NETWORK_NONE, + errors.join("\n") + ); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_network_isolation_valid() { + let dockerfile = r#" +FROM base +RUN echo "before cutoff, no network restriction needed" +# external dependency cutoff point +RUN --network=none echo "good" +RUN --network=none --mount=type=bind,from=foo,target=/bar some-command +"#; + verify_dockerfile_network_isolation(dockerfile).unwrap(); + } + + #[test] + fn test_network_isolation_missing_flag() { + let dockerfile = r#" +FROM base +# external dependency cutoff point +RUN --network=none echo "good" +RUN echo "bad - missing network flag" +"#; + let err = verify_dockerfile_network_isolation(dockerfile).unwrap_err(); + let msg = err.to_string(); + assert!(msg.contains("line 5"), "error should mention line 5: {msg}"); + } + + #[test] + fn test_network_isolation_wrong_position() { + // --network=none must come immediately after RUN + let dockerfile = r#" +FROM base +# external dependency cutoff point +RUN --mount=type=bind,from=foo,target=/bar --network=none echo "bad" +"#; + let err = verify_dockerfile_network_isolation(dockerfile).unwrap_err(); + let msg = err.to_string(); + assert!(msg.contains("line 4"), "error should mention line 4: {msg}"); + } +} diff --git a/crates/xtask/src/tmt.rs b/crates/xtask/src/tmt.rs index 682381a7d..0e740fd26 100644 --- a/crates/xtask/src/tmt.rs +++ b/crates/xtask/src/tmt.rs @@ -324,9 +324,12 @@ pub(crate) fn run_tmt(sh: &Shell, args: &RunTmtArgs) -> Result<()> { // Workaround for https://github.com/bootc-dev/bcvk/issues/174 // Save the container image to tar, this will be synced to tested OS if variant_id == "coreos" { - cmd!(sh, "podman save -q -o {workdir}/tmt/tests/bootc.tar localhost/bootc-integration-coreos:latest") - .run() - .with_context(|| format!("Saving container image to tar"))?; + cmd!( + sh, + "podman save -q -o {workdir}/tmt/tests/bootc.tar localhost/bootc-coreos:latest" + ) + .run() + .with_context(|| format!("Saving container image to tar"))?; } // Change to workdir for running tmt commands diff --git a/crates/xtask/src/xtask.rs b/crates/xtask/src/xtask.rs index 203a2f3de..6921a681f 100644 --- a/crates/xtask/src/xtask.rs +++ b/crates/xtask/src/xtask.rs @@ -14,6 +14,7 @@ use clap::{Args, Parser, Subcommand}; use fn_error_context::context; use xshell::{cmd, Shell}; +mod buildsys; mod man; mod tmt; @@ -58,7 +59,7 @@ enum Commands { /// Arguments for run-tmt command #[derive(Debug, Args)] pub(crate) struct RunTmtArgs { - /// Image name (e.g., "localhost/bootc-integration") + /// Image name (e.g., "localhost/bootc") pub(crate) image: String, /// Test plan filters (e.g., "readonly") @@ -73,7 +74,7 @@ pub(crate) struct RunTmtArgs { #[clap(long)] pub(crate) env: Vec, - /// Upgrade image to use when bind-storage-ro is available (e.g., localhost/bootc-integration-upgrade) + /// Upgrade image to use when bind-storage-ro is available (e.g., localhost/bootc-upgrade) #[clap(long)] pub(crate) upgrade_image: Option, @@ -85,7 +86,7 @@ pub(crate) struct RunTmtArgs { /// Arguments for tmt-provision command #[derive(Debug, Args)] pub(crate) struct TmtProvisionArgs { - /// Image name (e.g., "localhost/bootc-integration") + /// Image name (e.g., "localhost/bootc") pub(crate) image: String, /// VM name (defaults to "bootc-tmt-manual-") @@ -137,7 +138,7 @@ fn try_main() -> Result<()> { Commands::Spec => spec(&sh), Commands::RunTmt(args) => tmt::run_tmt(&sh, &args), Commands::TmtProvision(args) => tmt::tmt_provision(&sh, &args), - Commands::CheckBuildsys => check_buildsys(&sh), + Commands::CheckBuildsys => buildsys::check_buildsys(&sh, "Dockerfile".into()), } } @@ -405,48 +406,3 @@ fn update_generated(sh: &Shell) -> Result<()> { Ok(()) } - -/// Check build system properties -/// -/// - Reproducible builds for the RPM -#[context("Checking build system")] -fn check_buildsys(sh: &Shell) -> Result<()> { - use std::collections::BTreeMap; - - println!("Checking reproducible builds..."); - // Helper to compute SHA256 of bootc RPMs in target/packages/ - fn get_rpm_checksums(sh: &Shell) -> Result> { - // Find bootc*.rpm files in target/packages/ - let packages_dir = Utf8Path::new("target/packages"); - let mut rpm_files: Vec = Vec::new(); - for entry in std::fs::read_dir(packages_dir).context("Reading target/packages")? { - let entry = entry?; - let path = Utf8PathBuf::try_from(entry.path())?; - if path.extension() == Some("rpm") { - rpm_files.push(path); - } - } - - assert!(!rpm_files.is_empty()); - - let mut checksums = BTreeMap::new(); - for rpm_path in &rpm_files { - let output = cmd!(sh, "sha256sum {rpm_path}").read()?; - let (hash, filename) = output - .split_once(" ") - .with_context(|| format!("failed to parse sha256sum output: '{}'", output))?; - checksums.insert(filename.to_owned(), hash.to_owned()); - } - Ok(checksums) - } - - cmd!(sh, "just package").run()?; - let first_checksums = get_rpm_checksums(sh)?; - cmd!(sh, "just package").run()?; - let second_checksums = get_rpm_checksums(sh)?; - - itertools::assert_equal(first_checksums, second_checksums); - println!("ok package reproducibility"); - - Ok(()) -} diff --git a/hack/Containerfile b/hack/Containerfile deleted file mode 100644 index 5ec8ab7ff..000000000 --- a/hack/Containerfile +++ /dev/null @@ -1,43 +0,0 @@ -# Build a container image that has extra testing stuff in it, such -# as nushell, some preset logically bound images, etc. This expects -# to create an image derived FROM localhost/bootc which was created -# by the Dockerfile at top. - -FROM scratch as context -# We only need this stuff in the initial context -COPY . / - -# An intermediate layer which caches the extended RPMS -FROM localhost/bootc as extended -# And this layer has additional stuff for testing, such as nushell etc. -RUN --mount=type=bind,from=context,target=/run/context <}" -if [ -n "${SKIP_CONFIGS:-}" ]; then - echo "Skipping configs installation" - exit 0 -fi -set -xeuo pipefail -cd /run/context -# For test-22-logically-bound-install -cp -a lbi/usr/. /usr -for x in curl.container curl-base.image podman.image; do - ln -s /usr/share/containers/systemd/$x /usr/lib/bootc/bound-images.d/$x -done - -# Add some testing kargs into our dev builds -install -D -t /usr/lib/bootc/kargs.d test-kargs/* -# Also copy in some default install configs we use for testing -install -D -t /usr/lib/bootc/install/ install-test-configs/* -# Finally, test our own linting -bootc container lint --fatal-warnings -EORUN diff --git a/hack/Containerfile.drop-lbis b/hack/Containerfile.drop-lbis index 4f4891d2e..ecf2a84fe 100644 --- a/hack/Containerfile.drop-lbis +++ b/hack/Containerfile.drop-lbis @@ -1,3 +1,3 @@ -FROM localhost/bootc-integration +FROM localhost/bootc # Workaround for https://github.com/bootc-dev/bootc/issues/1618 RUN rm -rf /usr/lib/bootc/bound-images.d/* diff --git a/hack/Containerfile.packit b/hack/Containerfile.packit index 87071ed11..2e6e6e40b 100644 --- a/hack/Containerfile.packit +++ b/hack/Containerfile.packit @@ -31,17 +31,6 @@ dnf -y update bootc dnf -y install audit ./provision-derived.sh cloudinit -# For test-22-logically-bound-install -cp -a lbi/usr/. /usr -for x in curl.container curl-base.image podman.image; do - ln -s /usr/share/containers/systemd/$x /usr/lib/bootc/bound-images.d/$x -done - -# Add some testing kargs into our dev builds -install -D -t /usr/lib/bootc/kargs.d test-kargs/* -# Also copy in some default install configs we use for testing -install -D -t /usr/lib/bootc/install/ install-test-configs/* - # Remove bootc repo, bootc updated already rm -rf /var/share/test-artifacts /etc/yum.repos.d/test-artifacts.repo # Clean up dnf diff --git a/hack/provision-derived.sh b/hack/provision-derived.sh index af5a12733..029f9fc4f 100755 --- a/hack/provision-derived.sh +++ b/hack/provision-derived.sh @@ -55,8 +55,8 @@ if test $cloudinit = 1; then dnf -y install cloud-init ln -s ../cloud-init.target /usr/lib/systemd/system/default.target.wants # Allow root SSH login for testing with bcvk/tmt -mkdir -p /etc/cloud/cloud.cfg.d -cat > /etc/cloud/cloud.cfg.d/80-enable-root.cfg <<'CLOUDEOF' + mkdir -p /etc/cloud/cloud.cfg.d + cat > /etc/cloud/cloud.cfg.d/80-enable-root.cfg <<'CLOUDEOF' # Enable root login for testing disable_root: false @@ -122,3 +122,20 @@ d /var/lib/dhclient 0755 root root - - EOF rm -rf /var/lib/dhclient fi + +# The following configs are skipped when SKIP_CONFIGS=1, which is used +# for testing bootc install on Fedora CoreOS where these would conflict. +if test -z "${SKIP_CONFIGS:-}"; then + # For test-22-logically-bound-install + cp -a lbi/usr/. /usr + for x in curl.container curl-base.image podman.image; do + ln -s /usr/share/containers/systemd/$x /usr/lib/bootc/bound-images.d/$x + done + + # Add some testing kargs into our dev builds + install -D -t /usr/lib/bootc/kargs.d test-kargs/* + # Also copy in some default install configs we use for testing + install -D -t /usr/lib/bootc/install/ install-test-configs/* +else + echo "SKIP_CONFIGS is set, skipping LBIs, test kargs, and install configs" +fi diff --git a/hack/provision-packit.sh b/hack/provision-packit.sh index 7f1848bf8..9ec9a144a 100755 --- a/hack/provision-packit.sh +++ b/hack/provision-packit.sh @@ -86,7 +86,7 @@ cp /etc/yum.repos.d/test-artifacts.repo "$BOOTC_TEMPDIR" ls -al "$BOOTC_TEMPDIR" # Do not use just because it's only available on Fedora, not on CS and RHEL -podman build --jobs=4 --from "$BASE" -v "$BOOTC_TEMPDIR":/bootc-test:z -t localhost/bootc-integration -f "${BOOTC_TEMPDIR}/Containerfile.packit" "$BOOTC_TEMPDIR" +podman build --jobs=4 --from "$BASE" -v "$BOOTC_TEMPDIR":/bootc-test:z -t localhost/bootc -f "${BOOTC_TEMPDIR}/Containerfile.packit" "$BOOTC_TEMPDIR" # Keep these in sync with what's used in hack/lbi podman pull -q --retry 5 --retry-delay 5s quay.io/curl/curl:latest quay.io/curl/curl-base:latest registry.access.redhat.com/ubi9/podman:latest diff --git a/hack/system-reinstall-bootc.exp b/hack/system-reinstall-bootc.exp index 760033095..54effbd74 100755 --- a/hack/system-reinstall-bootc.exp +++ b/hack/system-reinstall-bootc.exp @@ -3,7 +3,7 @@ # Set a timeout set timeout 600 -spawn system-reinstall-bootc localhost/bootc-integration +spawn system-reinstall-bootc localhost/bootc expect { "Then you can login as * using those keys. \\\[Y/n\\\]" { diff --git a/tmt/tests/Dockerfile.upgrade b/tmt/tests/Dockerfile.upgrade index ab3b73c7c..a9e36ba50 100644 --- a/tmt/tests/Dockerfile.upgrade +++ b/tmt/tests/Dockerfile.upgrade @@ -1,3 +1,3 @@ # Just creates a file as a new layer for a synthetic upgrade test -FROM localhost/bootc-integration +FROM localhost/bootc RUN touch --reference=/usr/bin/bash /usr/share/testing-bootc-upgrade-apply