Skip to content
This repository has been archived by the owner on Jun 4, 2021. It is now read-only.

Commit

Permalink
Merge pull request #102 from google/upstream-1535145266
Browse files Browse the repository at this point in the history
Fix rules_docker #474, fix bug in LayerSource, change tool's name from digest.par to digester.par
  • Loading branch information
jonjohnsonjr authored Aug 24, 2018
2 parents 03ec6a3 + 6dde789 commit b2caa39
Show file tree
Hide file tree
Showing 9 changed files with 263 additions and 27 deletions.
20 changes: 10 additions & 10 deletions BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,9 @@ par_binary(
)

par_binary(
name = "digest",
srcs = ["tools/image_digest_.py"],
main = "tools/image_digest_.py",
name = "digester",
srcs = ["tools/image_digester_.py"],
main = "tools/image_digester_.py",
visibility = ["//visibility:public"],
deps = [":containerregistry"],
)
Expand All @@ -85,7 +85,7 @@ sh_test(
size = "large",
srcs = ["appender_test.sh"],
data = [
"testenv.sh",
":testenv.sh",
":appender.par",
],
)
Expand All @@ -95,7 +95,7 @@ sh_test(
size = "large",
srcs = ["puller_test.sh"],
data = [
"testenv.sh",
":testenv.sh",
":puller.par",
],
)
Expand All @@ -105,18 +105,18 @@ sh_test(
size = "large",
srcs = ["pusher_test.sh"],
data = [
"testenv.sh",
":testenv.sh",
":pusher.par",
],
)

sh_test(
name = "digest_test",
name = "digester_test",
size = "large",
srcs = ["digest_test.sh"],
srcs = ["digester_test.sh"],
data = [
"testenv.sh",
":digest.par",
":testenv.sh",
":digester.par",
":pusher.par",
],
)
2 changes: 1 addition & 1 deletion client/v1/save_.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def add_file(filename, contents):

for layer_id in image.ancestry(image.top()):
# Add each layer_id exactly once.
if layer_id in seen:
if layer_id in seen or json.loads(image.json(layer_id)).get('throwaway'):
continue
seen.add(layer_id)

Expand Down
29 changes: 22 additions & 7 deletions client/v2_2/docker_image_.py
Original file line number Diff line number Diff line change
Expand Up @@ -695,6 +695,22 @@ def __init__(self,
with FromTarball(legacy_base) as base:
self._legacy_base = base

def _get_foreign_layers(self):
foreign_layers = []
if self._foreign_layers_manifest:
manifest = json.loads(self._foreign_layers_manifest)
if 'layers' in manifest:
for layer in manifest['layers']:
if layer['mediaType'] == docker_http.FOREIGN_LAYER_MIME:
foreign_layers.append(layer)
return foreign_layers

def _get_foreign_layer_by_digest(self, digest):
for foreign_layer in self._get_foreign_layers():
if foreign_layer['digest'] == digest:
return foreign_layer
return None

def _populate_manifest(self):
base_layers = []
if self._legacy_base:
Expand All @@ -703,11 +719,7 @@ def _populate_manifest(self):
# Manifest files found in tar files are actually a json list.
# This code iterates through that collection and appends any foreign
# layers described in the order found in the config file.
manifest = json.loads(self._foreign_layers_manifest)
if 'layers' in manifest:
for layer in manifest['layers']:
if layer['mediaType'] == docker_http.FOREIGN_LAYER_MIME:
base_layers.append(layer)
base_layers += self._get_foreign_layers()

# TODO(user): Update mimes here for oci_compat.
self._manifest = json.dumps(
Expand Down Expand Up @@ -747,8 +759,11 @@ def config_file(self):
def uncompressed_blob(self, digest):
"""Override."""
if digest not in self._layer_to_filename:
# Leverage the FromTarball fast-path.
return self._legacy_base.uncompressed_blob(digest)
if self._get_foreign_layer_by_digest(digest):
return bytes([])
else:
# Leverage the FromTarball fast-path.
return self._legacy_base.uncompressed_blob(digest)
return super(FromDisk, self).uncompressed_blob(digest)

def uncompressed_layer(self, diff_id):
Expand Down
9 changes: 6 additions & 3 deletions client/v2_2/docker_session_.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,9 @@ def _put_blob(self, image, digest):
# repository that is known to contain this blob and skips the upload.
self._patch_upload(image, digest)

def _remote_tag_digest(self):
def _remote_tag_digest(
self, image
):
"""Check the remote for the given manifest by digest."""

# GET the tag we're pushing
Expand All @@ -216,7 +218,8 @@ def _remote_tag_digest(self):
method='GET',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
])
],
accepted_mimes=[image.media_type()])

if resp.status == six.moves.http_client.NOT_FOUND: # pytype: disable=attribute-error
return None
Expand Down Expand Up @@ -293,7 +296,7 @@ def upload(self,
# checks (they must exist).
if self._manifest_exists(image):
if isinstance(self._name, docker_name.Tag):
if self._remote_tag_digest() == image.digest():
if self._remote_tag_digest(image) == image.digest():
logging.info('Tag points to the right manifest, skipping push.')
return
logging.info('Manifest exists, skipping blob uploads and pushing tag.')
Expand Down
10 changes: 6 additions & 4 deletions client/v2_2/save_.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,8 @@ def add_file(filename, contents):
# We don't just exclude the empty tar because we leave its diff_id
# in the set when coming through v2_compat.V22FromV2
for layer_id in reversed(v1_img.ancestry(v1_img.top()))
if _diff_id(v1_img, layer_id) in diffs
if _diff_id(v1_img, layer_id) in diffs and
not json.loads(v1_img.json(layer_id)).get('throwaway')
],
'RepoTags': [str(tag)]
}
Expand All @@ -110,9 +111,10 @@ def add_file(filename, contents):
input_manifest = json.loads(image.manifest())
input_layers = input_manifest['layers']

for i, diff_id in enumerate(diffs):
if input_layers[i]['mediaType'] == docker_http.FOREIGN_LAYER_MIME:
layer_sources[diff_id] = input_layers[i]
for input_layer in input_layers:
if input_layer['mediaType'] == docker_http.FOREIGN_LAYER_MIME:
diff_id = image.digest_to_diff_id(input_layer['digest'])
layer_sources[diff_id] = input_layer

if layer_sources:
manifest['LayerSources'] = layer_sources
Expand Down
33 changes: 33 additions & 0 deletions digester.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
steps:
# Build the pusher PAR file
- name: gcr.io/cloud-builders/bazel
args: [
'build', '//:pusher.par',
'--strategy', 'PythonCompile=standalone'
]

# Upload the pusher PAR file to a public GCS bucket
- name: gcr.io/cloud-builders/gsutil
args: [
'cp',
'bazel-bin/pusher.par',
'gs://containerregistry-releases/$TAG_NAME/pusher.par'
]

# Build the digest PAR file
- name: gcr.io/cloud-builders/bazel
args: [
'build', '//:digester.par',
'--strategy', 'PythonCompile=standalone'
]

# Upload the digest PAR file to a public GCS bucket
- name: gcr.io/cloud-builders/gsutil
args: [
'cp',
'bazel-bin/digester.par',
'gs://containerregistry-releases/$TAG_NAME/digester.par'
]

# We produce no Docker images.
images: []
68 changes: 68 additions & 0 deletions digester_test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
#!/bin/bash -e

# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Unit tests for digester.par

# Trick to chase the symlink before the docker build.
cp digester.par digester2.par
cp pusher.par pusher2.par

# Generate a fresh random image to avoid completely
# incremental pushes.
function generate_image() {
local target=$1

cat > Dockerfile <<EOF
FROM alpine
RUN head -c100 /dev/urandom > /tmp/random.txt
EOF
docker build -t random .
docker save -o "${target}" random
docker rmi -f random
}

# Test pushing an image by just invoking the digester
function test_digester() {
local image=$1
local random_image="direct.$RANDOM.tar"
local output_file="digest.txt"
generate_image "${random_image}"

# Test it in our current environment.
# Output has following format: {image} was published with digest: sha256:...
output="$(pusher.par --name="${image}" --tarball="${random_image}")"
push_digest="$(echo "${output##* }")"

digester.par --tarball="${random_image}" --output-digest="${output_file}"
digest="$(cat ${output_file})"
if [ "${push_digest}" != "${digest}" ]; then
echo "Digests don't match."
exit 1
fi
}

function test_image() {
local image=$1

echo "TESTING: ${image}"

test_digester "${image}"
}


# Test pushing a trivial image.
# The registered credential only has access to this repository, which is only used for testing.
test_image gcr.io/containerregistry-releases/digest-testing:latest
4 changes: 2 additions & 2 deletions tools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
setattr(x, 'fast_pusher', fast_pusher_)


from containerregistry.tools import image_digest_
setattr(x, 'image_digest', image_digest_)
from containerregistry.tools import image_digester_
setattr(x, 'image_digester', image_digester_)


115 changes: 115 additions & 0 deletions tools/image_digester_.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package calculates the digest of an image.
The format this tool *expects* to deal with is proprietary.
Image digests aren't stable upon gzip implementation/configuration.
This tool is expected to be only self-consistent.
"""

from __future__ import absolute_import
from __future__ import print_function

import argparse
import logging
import sys

from containerregistry.client.v2_2 import docker_image as v2_2_image
from containerregistry.client.v2_2 import oci_compat
from containerregistry.tools import logging_setup

from six.moves import zip # pylint: disable=redefined-builtin

parser = argparse.ArgumentParser(
description='Calculate digest for a container image.')

parser.add_argument(
'--tarball', action='store', help='An optional legacy base image tarball.')

parser.add_argument(
'--output-digest',
required=True,
action='store',
help='Filename to store digest in.')

parser.add_argument(
'--config',
action='store',
help='The path to the file storing the image config.')

parser.add_argument(
'--digest',
action='append',
help='The list of layer digest filenames in order.')

parser.add_argument(
'--layer', action='append', help='The list of layer filenames in order.')

parser.add_argument(
'--oci', action='store_true', help='Image has an OCI Manifest.')


def main():
logging_setup.DefineCommandLineArgs(parser)
args = parser.parse_args()
logging_setup.Init(args=args)

if not args.config and (args.layer or args.digest):
logging.fatal(
'Using --layer or --digest requires --config to be specified.')
sys.exit(1)

if not args.config and not args.tarball:
logging.fatal('Either --config or --tarball must be specified.')
sys.exit(1)

# If config is specified, use that. Otherwise, fallback on reading
# the config from the tarball.
config = args.config
if args.config:
logging.info('Reading config from %r', args.config)
with open(args.config, 'r') as reader:
config = reader.read()
elif args.tarball:
logging.info('Reading config from tarball %r', args.tarball)
with v2_2_image.FromTarball(args.tarball) as base:
config = base.config_file()

if len(args.digest or []) != len(args.layer or []):
logging.fatal('--digest and --layer must have matching lengths.')
sys.exit(1)

logging.info('Loading v2.2 image from disk ...')
with v2_2_image.FromDisk(
config,
list(zip(args.digest or [], args.layer or [])),
legacy_base=args.tarball) as v2_2_img:

try:
if args.oci:
with oci_compat.OCIFromV22(v2_2_img) as oci_img:
digest = oci_img.digest()
else:
digest = v2_2_img.digest()

with open(args.output_digest, 'w+') as digest_file:
digest_file.write(digest)
# pylint: disable=broad-except
except Exception as e:
logging.fatal('Error getting digest: %s', e)
sys.exit(1)


if __name__ == '__main__':
main()

0 comments on commit b2caa39

Please sign in to comment.