From 4edd5c0c860cc5a4a809870cdc07a806e9ba0492 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 9 May 2023 13:35:57 -0400 Subject: [PATCH] Doing requested changes --- lib/src/chunking.rs | 457 ++++++++++++++++++++----------- lib/src/cli.rs | 2 +- lib/src/container/encapsulate.rs | 72 ++--- lib/src/fixture.rs | 7 +- lib/tests/it/main.rs | 7 +- 5 files changed, 348 insertions(+), 197 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 3ad6e6a2..072f8cbd 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -15,6 +15,7 @@ use crate::objgv::*; use crate::statistics; use anyhow::{anyhow, Result}; use camino::Utf8PathBuf; +use containers_image_proxy::oci_spec; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{Marker, Structure}; use ostree::{gio, glib}; @@ -27,6 +28,11 @@ pub(crate) const MAX_CHUNKS: u32 = 64; type RcStr = Rc; pub(crate) type ChunkMapping = BTreeMap)>; +// TODO type PackageSet = HashSet; + +const LOW_PARTITION: &str = "2ls"; +const HIGH_PARTITION: &str = "1hs"; +const CONTENT_ANNOTATION: &str = "Content"; #[derive(Debug, Default)] pub(crate) struct Chunk { @@ -262,7 +268,7 @@ impl Chunking { rev: &str, meta: ObjectMetaSized, max_layers: &Option, - prior_build_metadata: &Option>>, + prior_build_metadata: Option, ) -> Result { let mut r = Self::new(repo, rev)?; r.process_mapping(meta, max_layers, prior_build_metadata)?; @@ -280,7 +286,7 @@ impl Chunking { &mut self, meta: ObjectMetaSized, max_layers: &Option, - prior_build_metadata: &Option>>, + prior_build_metadata: Option, ) -> Result<()> { self.max = max_layers .unwrap_or(NonZeroU32::new(MAX_CHUNKS).unwrap()) @@ -318,7 +324,7 @@ impl Chunking { prior_build_metadata, ); let duration = start.elapsed(); - println!("Time elapsed in packing: {:#?}", duration); + tracing::debug!("Time elapsed in packing: {:#?}", duration); for bin in packing.into_iter() { let name = match bin.len() { @@ -343,7 +349,7 @@ impl Chunking { n => Cow::Owned(format!("{n} components")), }; let mut chunk = Chunk::new(&name); - chunk.packages = bin.iter().map(|v| String::from(&v.meta.name)).collect(); + chunk.packages = bin.iter().map(|v| String::from(&*v.meta.name)).collect(); for szmeta in bin { for &obj in rmap.get(&szmeta.meta.identifier).unwrap() { self.remainder.move_obj(&mut chunk, obj.as_str()); @@ -443,7 +449,7 @@ fn get_partitions_with_threshold( //low size (ls) else if size <= size_low_limit { partitions - .entry("2ls".to_string()) + .entry(LOW_PARTITION.to_string()) .and_modify(|bin| bin.push(pkg)) .or_insert_with(|| vec![pkg]); } @@ -459,7 +465,7 @@ fn get_partitions_with_threshold( //Concatenate extra hs packages + med_sizes to keep it descending sorted remaining_pkgs.append(&mut med_size); - partitions.insert("1hs".to_string(), high_size); + partitions.insert(HIGH_PARTITION.to_string(), high_size); //Ascending sorted by frequency, so each partition within ms is freq sorted remaining_pkgs.sort_by(|a, b| { @@ -489,89 +495,34 @@ fn get_partitions_with_threshold( let size = pkg.size as f64; let freq = pkg.meta.change_frequency as f64; - //low frequency, high size - if (freq <= med_freq_low_limit) && (size >= med_size_high_limit) { - partitions - .entry("lf_hs".to_string()) - .and_modify(|bin| bin.push(pkg)) - .or_insert_with(|| vec![pkg]); - } - //medium frequency, high size - else if (freq < med_freq_high_limit) - && (freq > med_freq_low_limit) - && (size >= med_size_high_limit) - { - partitions - .entry("mf_hs".to_string()) - .and_modify(|bin| bin.push(pkg)) - .or_insert_with(|| vec![pkg]); - } - //high frequency, high size - else if (freq >= med_freq_high_limit) && (size >= med_size_high_limit) { - partitions - .entry("hf_hs".to_string()) - .and_modify(|bin| bin.push(pkg)) - .or_insert_with(|| vec![pkg]); - } - //low frequency, medium size - else if (freq <= med_freq_low_limit) - && (size < med_size_high_limit) - && (size > med_size_low_limit) - { - partitions - .entry("lf_ms".to_string()) - .and_modify(|bin| bin.push(pkg)) - .or_insert_with(|| vec![pkg]); - } - //medium frequency, medium size - else if (freq < med_freq_high_limit) - && (freq > med_freq_low_limit) - && (size < med_size_high_limit) - && (size > med_size_low_limit) - { - partitions - .entry("mf_ms".to_string()) - .and_modify(|bin| bin.push(pkg)) - .or_insert_with(|| vec![pkg]); - } - //high frequency, medium size - else if (freq >= med_freq_high_limit) - && (size < med_size_high_limit) - && (size > med_size_low_limit) - { - partitions - .entry("hf_ms".to_string()) - .and_modify(|bin| bin.push(pkg)) - .or_insert_with(|| vec![pkg]); - } - //low frequency, low size - else if (freq <= med_freq_low_limit) && (size <= med_size_low_limit) { - partitions - .entry("lf_ls".to_string()) - .and_modify(|bin| bin.push(pkg)) - .or_insert_with(|| vec![pkg]); - } - //medium frequency, low size - else if (freq < med_freq_high_limit) - && (freq > med_freq_low_limit) - && (size <= med_size_low_limit) - { - partitions - .entry("mf_ls".to_string()) - .and_modify(|bin| bin.push(pkg)) - .or_insert_with(|| vec![pkg]); + let size_name; + if size >= med_size_high_limit { + size_name = "hs"; + } else if size <= med_size_low_limit { + size_name = "ls"; + } else { + size_name = "ms"; } - //high frequency, low size - else if (freq >= med_freq_high_limit) && (size <= med_size_low_limit) { - partitions - .entry("hf_ls".to_string()) - .and_modify(|bin| bin.push(pkg)) - .or_insert_with(|| vec![pkg]); + + //Numbered to maintain order of partitions in a BTreeMap of hf, mf, lf + let freq_name; + if freq >= med_freq_high_limit { + freq_name = "3hf"; + } else if freq <= med_freq_low_limit { + freq_name = "5lf"; + } else { + freq_name = "4mf"; } + + let bucket = format!("{freq_name}_{size_name}"); + partitions + .entry(bucket.to_string()) + .and_modify(|bin| bin.push(pkg)) + .or_insert_with(|| vec![pkg]); } for (name, pkgs) in &partitions { - println!("{:#?}: {:#?}", name, pkgs.len()); + tracing::debug!("{:#?}: {:#?}", name, pkgs.len()); } Some(partitions) @@ -596,62 +547,68 @@ fn get_partitions_with_threshold( fn basic_packing<'a>( components: &'a [ObjectSourceMetaSized], bin_size: NonZeroU32, - prior_build_metadata: &'a Option>>, + prior_build_metadata: Option, ) -> Vec> { let mut r = Vec::new(); let mut components: Vec<_> = components.iter().collect(); let before_processing_pkgs_len = components.len(); - if before_processing_pkgs_len == 0 { - return Vec::new(); - } - //Flatten out prior_build_metadata[i] to view all the packages in prior build as a single vec - // + //If the current rpm-ostree commit to be encapsulated is not the one in which packing structure changes, then - // Compare flatten(prior_build_metadata[i]) to components to see if pkgs added, updated, + // Flatten out prior_build_metadata to view all the packages in prior build as a single vec + // Compare the flattened vector to components to see if pkgs added, updated, // removed or kept same - // if pkgs added, then add them to the last bin of prior[i][n] + // if pkgs added, then add them to the last bin of prior // if pkgs removed, then remove them from the prior[i] - // iterate through prior[i] and make bins according to the name in nevra of pkgs and return - // (no need of recomputing packaging structure) + // iterate through prior[i] and make bins according to the name in nevra of pkgs to update + // required packages //else if pkg structure to be changed || prior build not specified // Recompute optimal packaging strcuture (Compute partitions, place packages and optimize build) - if let Some(prior_build) = prior_build_metadata - /* && structure not be changed*/ - { - println!("Keeping old package structure"); - let mut curr_build: Vec> = prior_build.clone(); - //Packing only manaages RPMs not OStree commit - curr_build.remove(0); - let mut prev_pkgs: Vec = Vec::new(); - for bin in &curr_build { - for pkg in bin { - prev_pkgs.push(pkg.to_string()); - } - } + if let Some(prior_build) = prior_build_metadata { + tracing::debug!("Keeping old package structure"); + + //1st layer is skipped as packing doesn't manage ostree_commit layer + let mut curr_build: Vec> = prior_build + .layers() + .iter() + .skip(1) + .map(|layer| { + let annotation_layer = layer + .annotations() + .as_ref() + .expect("Layer does not consist annotation"); + let pkgs: Vec<&str> = annotation_layer[CONTENT_ANNOTATION].split(',').collect(); + pkgs.iter().map(|pkg| pkg.to_string()).collect() + }) + .collect(); + + //Flatten and filter + let mut prev_pkgs: Vec = curr_build.concat(); prev_pkgs.retain(|name| !name.is_empty()); - let curr_pkgs: Vec = components + + //View the packages as unordered sets for lookups and differencing + let prev_pkgs_set: HashSet = HashSet::from_iter(prev_pkgs); + let curr_pkgs_set: HashSet = components .iter() .map(|pkg| pkg.meta.name.to_string()) .collect(); - let prev_pkgs_set: HashSet = HashSet::from_iter(prev_pkgs); - let curr_pkgs_set: HashSet = HashSet::from_iter(curr_pkgs); - let added: HashSet<&String> = curr_pkgs_set.difference(&prev_pkgs_set).collect(); - let removed: HashSet<&String> = prev_pkgs_set.difference(&curr_pkgs_set).collect(); - let mut add_pkgs_v: Vec = Vec::new(); - for pkg in added { - add_pkgs_v.push(pkg.to_string()); - } - let mut rem_pkgs_v: Vec = Vec::new(); - for pkg in removed { - rem_pkgs_v.push(pkg.to_string()); + + //Handle added packages + if let Some(last_bin) = curr_build.last_mut() { + let added = curr_pkgs_set.difference(&prev_pkgs_set); + last_bin.retain(|name| !name.is_empty()); + last_bin.extend(added.into_iter().cloned()); + } else { + panic!("No empty last bin for added packages"); } - let curr_build_len = &curr_build.len(); - curr_build[curr_build_len - 1].retain(|name| !name.is_empty()); - curr_build[curr_build_len - 1].extend(add_pkgs_v); + + //Handle removed packages + let removed: HashSet<&String> = prev_pkgs_set.difference(&curr_pkgs_set).collect(); for bin in curr_build.iter_mut() { - bin.retain(|pkg| !rem_pkgs_v.contains(pkg)); + bin.retain(|pkg| !removed.contains(pkg)); } + + //Handle updated packages let mut name_to_component: HashMap = HashMap::new(); for component in &components { name_to_component @@ -666,10 +623,9 @@ fn basic_packing<'a>( } modified_build.push(mod_bin); } - let mut after_processing_pkgs_len = 0; - modified_build.iter().for_each(|bin| { - after_processing_pkgs_len += bin.len(); - }); + + //Verify all packages are included + let after_processing_pkgs_len: usize = modified_build.iter().map(|b| b.len()).sum(); assert_eq!(after_processing_pkgs_len, before_processing_pkgs_len); assert!(modified_build.len() <= bin_size.get() as usize); return modified_build; @@ -677,6 +633,16 @@ fn basic_packing<'a>( println!("Creating new packing structure"); + //Handle trivial case of no pkgs < bins + if before_processing_pkgs_len < bin_size.get() as usize { + components.into_iter().for_each(|pkg| r.push(vec![pkg])); + if before_processing_pkgs_len > 0 { + let new_pkgs_bin: Vec<&ObjectSourceMetaSized> = Vec::new(); + r.push(new_pkgs_bin); + } + return r; + } + let mut max_freq_components: Vec<&ObjectSourceMetaSized> = Vec::new(); components.retain(|pkg| { let retain: bool = pkg.meta.change_frequency != u32::MAX; @@ -693,52 +659,41 @@ fn basic_packing<'a>( let limit_ls_bins = 1usize; let limit_new_bins = 1usize; let _limit_new_pkgs = 0usize; - let limit_max_frequency_bins = 1usize; - let _limit_max_frequency_pkgs = max_freq_components.len(); + let limit_max_frequency_pkgs = max_freq_components.len(); + let limit_max_frequency_bins = if limit_max_frequency_pkgs > 0 { + 1usize + } else { + 0usize + }; let limit_hs_bins = (0.6 * (bin_size.get() - (limit_ls_bins + limit_new_bins + limit_max_frequency_bins) as u32) as f32) .floor() as usize; - let limit_ms_bins = (0.4 - * (bin_size.get() - - (limit_ls_bins + limit_new_bins + limit_max_frequency_bins) as u32) - as f32) - .floor() as usize; - + let limit_ms_bins = (bin_size.get() + - (limit_hs_bins + limit_ls_bins + limit_new_bins + limit_max_frequency_bins) + as u32) as usize; let partitions = get_partitions_with_threshold(components, limit_hs_bins as usize, 2f64) .expect("Partitioning components into sets"); - let limit_ls_pkgs = match partitions.get("2ls") { + let limit_ls_pkgs = match partitions.get(LOW_PARTITION) { Some(n) => n.len(), None => 0usize, }; let pkg_per_bin_ms: usize = - match (components_len_after_max_freq - limit_hs_bins - limit_ls_pkgs) + (components_len_after_max_freq - limit_hs_bins - limit_ls_pkgs) .checked_div(limit_ms_bins) - { - Some(n) => { - if n < 1 { - panic!("Error: No of bins <= 3"); - } - n - } - None => { - panic!("Error: No of bins <= 3") - } - }; + .expect("number of bins should be >= 4"); //Bins assignment - for partition in partitions.keys() { - let pkgs = partitions.get(partition).expect("hashset"); - - if partition == "1hs" { + for (partition, pkgs) in partitions.iter() { + if partition == HIGH_PARTITION { for pkg in pkgs { r.push(vec![*pkg]); } - } else if partition == "2ls" { + } else if partition == LOW_PARTITION { let mut bin: Vec<&ObjectSourceMetaSized> = Vec::new(); for pkg in pkgs { bin.push(*pkg); @@ -761,9 +716,17 @@ fn basic_packing<'a>( } } } - println!("Bins before unoptimized build: {}", r.len()); - - //Addressing MS bins limit breach by wrapping MS layers + tracing::debug!("Bins before unoptimized build: {}", r.len()); + + //Despite allocation certain number of pkgs per bin in MS partitions, the + //hard limit of number of MS bins can be exceeded. This is because the pkg_per_bin_ms + //is only upper limit and there is no lower limit. Thus, if a partition in MS has only 1 pkg + //but pkg_per_bin_ms > 1, then the entire bin will have 1 pkg. This prevents partition + //mixing. + // + //Addressing MS bins limit breach by mergin internal MS partitions + //The partitions in MS are merged beginnign from the end so to not mix hf bins with lf bins. The + //bins are kept in this order: hf, mf, lf by design. while r.len() > (bin_size.get() as usize - limit_new_bins - limit_max_frequency_bins) { for i in (limit_ls_bins + limit_hs_bins..r.len() - 1) .step_by(2) @@ -784,10 +747,13 @@ fn basic_packing<'a>( r.insert(i, merge); } } - println!("Bins after optimization: {}", r.len()); + tracing::debug!("Bins after optimization: {}", r.len()); } } - r.push(max_freq_components); + + if !max_freq_components.is_empty() { + r.push(max_freq_components); + } let new_pkgs_bin: Vec<&ObjectSourceMetaSized> = Vec::new(); r.push(new_pkgs_bin); @@ -810,7 +776,7 @@ mod test { fn test_packing_basics() -> Result<()> { // null cases for v in [1u32, 7].map(|v| NonZeroU32::new(v).unwrap()) { - assert_eq!(basic_packing(&[], v, &None).len(), 0); + assert_eq!(basic_packing(&[], v, None).len(), 0); } Ok(()) } @@ -821,7 +787,7 @@ mod test { serde_json::from_reader(flate2::read::GzDecoder::new(FCOS_CONTENTMETA))?; let total_size = contentmeta.iter().map(|v| v.size).sum::(); - let packing = basic_packing(&contentmeta, NonZeroU32::new(MAX_CHUNKS).unwrap(), &None); + let packing = basic_packing(&contentmeta, NonZeroU32::new(MAX_CHUNKS).unwrap(), None); assert!(!contentmeta.is_empty()); // We should fit into the assigned chunk size assert_eq!(packing.len() as u32, MAX_CHUNKS); @@ -830,4 +796,175 @@ mod test { assert_eq!(total_size, packed_total_size); Ok(()) } + + fn create_manifest(prev_expected_structure: Vec>) -> oci_spec::image::ImageManifest { + let mut p = prev_expected_structure + .iter() + .map(|b| { + b.iter() + .map(|p| p.split(".").collect::>()[0].to_string()) + .collect() + }) + .collect(); + let mut metadata_with_ostree_commit = vec![vec![String::from("ostree_commit")]]; + metadata_with_ostree_commit.append(&mut p); + + let config = oci_spec::image::DescriptorBuilder::default() + .media_type(oci_spec::image::MediaType::ImageConfig) + .size(7023) + .digest("sha256:imageconfig") + .build() + .expect("build config descriptor"); + + let layers: Vec = metadata_with_ostree_commit + .iter() + .map(|l| { + oci_spec::image::DescriptorBuilder::default() + .media_type(oci_spec::image::MediaType::ImageLayerGzip) + .size(100) + .digest(format!("sha256:{}", l.len())) + .annotations(HashMap::from([( + CONTENT_ANNOTATION.to_string(), + l.join(","), + )])) + .build() + .expect("build layer") + }) + .collect(); + + let image_manifest = oci_spec::image::ImageManifestBuilder::default() + .schema_version(oci_spec::image::SCHEMA_VERSION) + .config(config) + .layers(layers) + .build() + .expect("build image manifest"); + image_manifest + } + + #[test] + fn test_advanced_packing() -> Result<()> { + //Step1 : Initial build (Packing sructure computed) + let contentmeta_v0: Vec = vec![ + vec![1, u32::MAX, 100000], + vec![2, u32::MAX, 99999], + vec![3, 30, 99998], + vec![4, 100, 99997], + vec![10, 51, 1000], + vec![8, 50, 500], + vec![9, 1, 200], + vec![11, 100000, 199], + vec![6, 30, 2], + vec![7, 30, 1], + ] + .iter() + .map(|data| ObjectSourceMetaSized { + meta: ObjectSourceMeta { + identifier: RcStr::from(format!("pkg{}.0", data[0])), + name: RcStr::from(format!("pkg{}", data[0])), + srcid: RcStr::from(format!("srcpkg{}", data[0])), + change_time_offset: 0, + change_frequency: data[1], + }, + size: data[2] as u64, + }) + .collect(); + + let packing = basic_packing( + &contentmeta_v0.as_slice(), + NonZeroU32::new(6).unwrap(), + None, + ); + let structure: Vec> = packing + .iter() + .map(|bin| bin.iter().map(|pkg| &*pkg.meta.identifier).collect()) + .collect(); + let v0_expected_structure = vec![ + vec!["pkg3.0"], + vec!["pkg4.0"], + vec!["pkg6.0", "pkg7.0", "pkg11.0"], + vec!["pkg9.0", "pkg8.0", "pkg10.0"], + vec!["pkg1.0", "pkg2.0"], + vec![], + ]; + assert_eq!(structure, v0_expected_structure); + + //Step 2: Derive packing structure from last build + + let mut contentmeta_v1: Vec = contentmeta_v0; + //Upgrade pkg1.0 to 1.1 + contentmeta_v1[0].meta.identifier = RcStr::from("pkg1.1"); + //Remove pkg7 + contentmeta_v1.remove(contentmeta_v1.len() - 1); + //Add pkg5 + contentmeta_v1.push(ObjectSourceMetaSized { + meta: ObjectSourceMeta { + identifier: RcStr::from("pkg5.0"), + name: RcStr::from("pkg5"), + srcid: RcStr::from("srcpkg5"), + change_time_offset: 0, + change_frequency: 42, + }, + size: 100000, + }); + + let image_manifest_v0 = create_manifest(v0_expected_structure); + let packing_derived = basic_packing( + &contentmeta_v1.as_slice(), + NonZeroU32::new(6).unwrap(), + Some(image_manifest_v0), + ); + let structure_derived: Vec> = packing_derived + .iter() + .map(|bin| bin.iter().map(|pkg| &*pkg.meta.identifier).collect()) + .collect(); + let v1_expected_structure = vec![ + vec!["pkg3.0"], + vec!["pkg4.0"], + vec!["pkg6.0", "pkg11.0"], + vec!["pkg9.0", "pkg8.0", "pkg10.0"], + vec!["pkg1.1", "pkg2.0"], + vec!["pkg5.0"], + ]; + + assert_eq!(structure_derived, v1_expected_structure); + + //Step 3: Another update on derived where the pkg in the last bin updates + + let mut contentmeta_v2: Vec = contentmeta_v1; + //Upgrade pkg5.0 to 5.1 + contentmeta_v2[9].meta.identifier = RcStr::from("pkg5.1"); + //Add pkg12 + contentmeta_v2.push(ObjectSourceMetaSized { + meta: ObjectSourceMeta { + identifier: RcStr::from("pkg12.0"), + name: RcStr::from("pkg12"), + srcid: RcStr::from("srcpkg12"), + change_time_offset: 0, + change_frequency: 42, + }, + size: 100000, + }); + + let image_manifest_v1 = create_manifest(v1_expected_structure); + let packing_derived = basic_packing( + &contentmeta_v2.as_slice(), + NonZeroU32::new(6).unwrap(), + Some(image_manifest_v1), + ); + let structure_derived: Vec> = packing_derived + .iter() + .map(|bin| bin.iter().map(|pkg| &*pkg.meta.identifier).collect()) + .collect(); + let v2_expected_structure = vec![ + vec!["pkg3.0"], + vec!["pkg4.0"], + vec!["pkg6.0", "pkg11.0"], + vec!["pkg9.0", "pkg8.0", "pkg10.0"], + vec!["pkg1.1", "pkg2.0"], + vec!["pkg5.1", "pkg12.0"], + ]; + + assert_eq!(structure_derived, v2_expected_structure); + Ok(()) + } } diff --git a/lib/src/cli.rs b/lib/src/cli.rs index ba3c6f19..f5a3ef68 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -584,7 +584,7 @@ async fn container_export( ..Default::default() }; let pushed = - crate::container::encapsulate(repo, rev, &config, Some(opts), None, imgref).await?; + crate::container::encapsulate(repo, rev, &config, None, Some(opts), None, imgref).await?; println!("{}", pushed); Ok(()) } diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index fd8e95fd..66918656 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -150,37 +150,29 @@ fn export_chunked( .uncompressed_sha256 .clone(); - // Add the ostree layer - let mut annotation_ostree_layer = HashMap::new(); - annotation_ostree_layer.insert("Content".to_string(), "ostree_commit".to_string()); - ociw.push_layer( - manifest, - imgcfg, - ostree_layer, - description, - Some(annotation_ostree_layer), - ); - // Add the component/content layers - for (layer, name, packages) in layers { - let mut annotation_component_layer = HashMap::new(); - annotation_component_layer.insert("Content".to_string(), packages.join(",")); - ociw.push_layer( - manifest, - imgcfg, - layer, - name.as_str(), - Some(annotation_component_layer), - ); - } - // This label (mentioned above) points to the last layer that is part of - // the ostree commit. - labels.insert( - opts.format.label().into(), - format!("sha256:{}", last_digest), - ); - Ok(()) - } + // Add the ostree layer + let mut annotation_ostree_layer = HashMap::new(); + annotation_ostree_layer.insert("Content".to_string(), "ostree_commit".to_string()); + ociw.push_layer( + manifest, + imgcfg, + ostree_layer, + description, + Some(annotation_ostree_layer), + ); + // Add the component/content layers + for (layer, name, packages) in layers { + let mut annotation_component_layer = HashMap::new(); + annotation_component_layer.insert("Content".to_string(), packages.join(",")); + ociw.push_layer( + manifest, + imgcfg, + layer, + name.as_str(), + Some(annotation_component_layer), + ); } + // This label (mentioned above) points to the last layer that is part of // the ostree commit. labels.insert( @@ -199,6 +191,7 @@ fn build_oci( tag: Option<&str>, config: &Config, opts: ExportOpts, + prior_build: Option, contentmeta: Option, ) -> Result { if !ocidir_path.exists() { @@ -240,7 +233,7 @@ fn build_oci( commit, meta, &opts.max_layers, - &opts.prior_build_metadata, + prior_build, ) }) .transpose()?; @@ -324,6 +317,7 @@ async fn build_impl( repo: &ostree::Repo, ostree_ref: &str, config: &Config, + prior_build: Option, opts: Option, contentmeta: Option, dest: &ImageReference, @@ -341,6 +335,7 @@ async fn build_impl( tag, config, opts, + prior_build, contentmeta, )?; None @@ -356,6 +351,7 @@ async fn build_impl( None, config, opts, + prior_build, contentmeta, )?; @@ -390,8 +386,6 @@ pub struct ExportOpts { // TODO semver-break: remove this /// Use only the standard OCI version label pub no_legacy_version_label: bool, - /// Prevent major change in packaging structure by taking previous builds in order of priority - pub prior_build_metadata: Option>>, } impl ExportOpts { @@ -412,9 +406,19 @@ pub async fn encapsulate>( repo: &ostree::Repo, ostree_ref: S, config: &Config, + prior_build: Option, opts: Option, contentmeta: Option, dest: &ImageReference, ) -> Result { - build_impl(repo, ostree_ref.as_ref(), config, opts, contentmeta, dest).await + build_impl( + repo, + ostree_ref.as_ref(), + config, + prior_build, + opts, + contentmeta, + dest, + ) + .await } diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index ede3aa6f..fbf649e1 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -170,6 +170,7 @@ pub const CONTENTS_CHECKSUM_V0: &str = "5e41de82f9f861fa51e53ce6dd640a260e4fb29b7657f5a3f14157e93d2c0659"; // 1 for ostree commit, 2 for max frequency packages, 3 as empty layer pub const LAYERS_V0_LEN: usize = 3usize; +pub const PKGS_V0_LEN: usize = 7usize; #[derive(Debug, PartialEq, Eq)] enum SeLabel { @@ -663,11 +664,15 @@ impl Fixture { let contentmeta = self.get_object_meta().context("Computing object meta")?; let contentmeta = ObjectMetaSized::compute_sizes(self.srcrepo(), contentmeta) .context("Computing sizes")?; - let opts = ExportOpts::default(); + let opts = ExportOpts { + max_layers: std::num::NonZeroU32::new(PKGS_V0_LEN as u32), + ..Default::default() + }; let digest = crate::container::encapsulate( self.srcrepo(), self.testref(), &config, + None, Some(opts), Some(contentmeta), &imgref, diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index a73a4afe..a9e5b69c 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -21,7 +21,7 @@ use std::process::Command; use std::time::SystemTime; use xshell::cmd; -use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0, LAYERS_V0_LEN}; +use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0, LAYERS_V0_LEN, PKGS_V0_LEN}; const EXAMPLE_TAR_LAYER: &[u8] = include_bytes!("fixtures/hlinks.tar.gz"); const TEST_REGISTRY_DEFAULT: &str = "localhost:5000"; @@ -480,12 +480,14 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { let opts = ExportOpts { copy_meta_keys: vec!["buildsys.checksum".to_string()], copy_meta_opt_keys: vec!["nosuchvalue".to_string()], + max_layers: std::num::NonZeroU32::new(PKGS_V0_LEN as u32), ..Default::default() }; let digest = ostree_ext::container::encapsulate( fixture.srcrepo(), fixture.testref(), &config, + None, Some(opts), contentmeta, &srcoci_imgref, @@ -537,6 +539,7 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { &config, None, None, + None, &ociarchive_dest, ) .await @@ -910,6 +913,7 @@ async fn test_container_write_derive() -> Result<()> { }, None, None, + None, &ImageReference { transport: Transport::OciDir, name: base_oci_path.to_string(), @@ -1298,6 +1302,7 @@ async fn test_container_import_export_registry() -> Result<()> { &config, None, None, + None, &src_imgref, ) .await