From 09067db8a0098861c0ba03ead3cbb49579e9a055 Mon Sep 17 00:00:00 2001 From: Tyson Nottingham Date: Tue, 12 Jan 2021 13:04:51 -0800 Subject: [PATCH] Serialize dependency graph directly from DepGraph Reduce memory usage by serializing dep graph directly from `DepGraph`, rather than copying it into `SerializedDepGraph` and serializing that. --- .../rustc_incremental/src/persist/save.rs | 82 +---- .../rustc_query_system/src/dep_graph/graph.rs | 335 +++++++++++++----- .../rustc_query_system/src/dep_graph/prev.rs | 2 +- .../src/dep_graph/serialized.rs | 79 ++++- 4 files changed, 328 insertions(+), 170 deletions(-) diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs index f63cdfc5694..45d474b89b8 100644 --- a/compiler/rustc_incremental/src/persist/save.rs +++ b/compiler/rustc_incremental/src/persist/save.rs @@ -1,6 +1,6 @@ use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::join; -use rustc_middle::dep_graph::{DepGraph, DepKind, WorkProduct, WorkProductId}; +use rustc_middle::dep_graph::{DepGraph, WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; use rustc_serialize::opaque::{FileEncodeResult, FileEncoder}; use rustc_serialize::Encodable as RustcEncodable; @@ -148,83 +148,15 @@ fn encode_dep_graph(tcx: TyCtxt<'_>, encoder: &mut FileEncoder) -> FileEncodeRes // First encode the commandline arguments hash tcx.sess.opts.dep_tracking_hash().encode(encoder)?; - // Encode the graph data. - let serialized_graph = - tcx.sess.time("incr_comp_serialize_dep_graph", || tcx.dep_graph.serialize()); - if tcx.sess.opts.debugging_opts.incremental_info { - #[derive(Clone)] - struct Stat { - kind: DepKind, - node_counter: u64, - edge_counter: u64, - } - - let total_node_count = serialized_graph.nodes.len(); - let total_edge_count = serialized_graph.edge_list_data.len(); - - let mut counts: FxHashMap<_, Stat> = - FxHashMap::with_capacity_and_hasher(total_node_count, Default::default()); - - for (i, &node) in serialized_graph.nodes.iter_enumerated() { - let stat = counts.entry(node.kind).or_insert(Stat { - kind: node.kind, - node_counter: 0, - edge_counter: 0, - }); - - stat.node_counter += 1; - let (edge_start, edge_end) = serialized_graph.edge_list_indices[i]; - stat.edge_counter += (edge_end - edge_start) as u64; - } - - let mut counts: Vec<_> = counts.values().cloned().collect(); - counts.sort_by_key(|s| -(s.node_counter as i64)); - - println!("[incremental]"); - println!("[incremental] DepGraph Statistics"); - - const SEPARATOR: &str = "[incremental] --------------------------------\ - ----------------------------------------------\ - ------------"; - - println!("{}", SEPARATOR); - println!("[incremental]"); - println!("[incremental] Total Node Count: {}", total_node_count); - println!("[incremental] Total Edge Count: {}", total_edge_count); - if let Some((total_edge_reads, total_duplicate_edge_reads)) = - tcx.dep_graph.edge_deduplication_data() - { - println!("[incremental] Total Edge Reads: {}", total_edge_reads); - println!("[incremental] Total Duplicate Edge Reads: {}", total_duplicate_edge_reads); - } - println!("[incremental]"); - println!( - "[incremental] {:<36}| {:<17}| {:<12}| {:<17}|", - "Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count" - ); - println!( - "[incremental] -------------------------------------\ - |------------------\ - |-------------\ - |------------------|" - ); - - for stat in counts.iter() { - println!( - "[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |", - format!("{:?}", stat.kind), - (100.0 * (stat.node_counter as f64)) / (total_node_count as f64), // percentage of all nodes - stat.node_counter, - (stat.edge_counter as f64) / (stat.node_counter as f64), // average edges per kind - ); - } - - println!("{}", SEPARATOR); - println!("[incremental]"); + tcx.dep_graph.print_incremental_info(); } - tcx.sess.time("incr_comp_encode_serialized_dep_graph", || serialized_graph.encode(encoder)) + // There is a tiny window between printing the incremental info above and encoding the dep + // graph below in which the dep graph could change, thus making the printed incremental info + // slightly out of date. If this matters to you, please feel free to submit a patch. :) + + tcx.sess.time("incr_comp_encode_serialized_dep_graph", || tcx.dep_graph.encode(encoder)) } fn encode_work_product_index( diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 605d7ae4af6..4a54ddf8545 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -7,6 +7,7 @@ use rustc_data_structures::unlikely; use rustc_errors::Diagnostic; use rustc_index::vec::{Idx, IndexVec}; +use rustc_serialize::{Encodable, Encoder}; use parking_lot::{Condvar, Mutex}; use smallvec::{smallvec, SmallVec}; @@ -21,7 +22,7 @@ use super::debug::EdgeFilter; use super::prev::PreviousDepGraph; use super::query::DepGraphQuery; -use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex}; +use super::serialized::SerializedDepNodeIndex; use super::{DepContext, DepKind, DepNode, WorkProductId}; #[derive(Clone)] @@ -148,7 +149,7 @@ pub fn query(&self) -> DepGraphQuery { let mut edge_list_indices = Vec::with_capacity(node_count); let mut edge_list_data = Vec::with_capacity(edge_count); - // See `serialize` for notes on the approach used here. + // See `DepGraph`'s `Encodable` implementation for notes on the approach used here. edge_list_data.extend(data.unshared_edges.iter().map(|i| i.index())); @@ -551,19 +552,6 @@ pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option { self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned() } - pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> { - if cfg!(debug_assertions) { - let current_dep_graph = &self.data.as_ref().unwrap().current; - - Some(( - current_dep_graph.total_read_count.load(Relaxed), - current_dep_graph.total_duplicate_read_count.load(Relaxed), - )) - } else { - None - } - } - fn edge_count(&self, node_data: &LockGuard<'_, DepNodeData>) -> usize { let data = self.data.as_ref().unwrap(); let previous = &data.previous; @@ -579,84 +567,6 @@ fn edge_count(&self, node_data: &LockGuard<'_, DepNodeData>) -> usize { edge_count } - pub fn serialize(&self) -> SerializedDepGraph { - type SDNI = SerializedDepNodeIndex; - - let data = self.data.as_ref().unwrap(); - let previous = &data.previous; - - // Note locking order: `prev_index_to_index`, then `data`. - let prev_index_to_index = data.current.prev_index_to_index.lock(); - let data = data.current.data.lock(); - let node_count = data.hybrid_indices.len(); - let edge_count = self.edge_count(&data); - - let mut nodes = IndexVec::with_capacity(node_count); - let mut fingerprints = IndexVec::with_capacity(node_count); - let mut edge_list_indices = IndexVec::with_capacity(node_count); - let mut edge_list_data = Vec::with_capacity(edge_count); - - // `rustc_middle::ty::query::OnDiskCache` expects nodes to be in - // `DepNodeIndex` order. The edges in `edge_list_data`, on the other - // hand, don't need to be in a particular order, as long as each node - // can reference its edges as a contiguous range within it. This is why - // we're able to copy `unshared_edges` directly into `edge_list_data`. - // It meets the above requirements, and each non-dark-green node already - // knows the range of edges to reference within it, which they'll push - // onto `edge_list_indices`. Dark green nodes, however, don't have their - // edges in `unshared_edges`, so need to add them to `edge_list_data`. - - edge_list_data.extend(data.unshared_edges.iter().map(|i| SDNI::new(i.index()))); - - for &hybrid_index in data.hybrid_indices.iter() { - match hybrid_index.into() { - HybridIndex::New(i) => { - let new = &data.new; - nodes.push(new.nodes[i]); - fingerprints.push(new.fingerprints[i]); - let edges = &new.edges[i]; - edge_list_indices.push((edges.start.as_u32(), edges.end.as_u32())); - } - HybridIndex::Red(i) => { - let red = &data.red; - nodes.push(previous.index_to_node(red.node_indices[i])); - fingerprints.push(red.fingerprints[i]); - let edges = &red.edges[i]; - edge_list_indices.push((edges.start.as_u32(), edges.end.as_u32())); - } - HybridIndex::LightGreen(i) => { - let lg = &data.light_green; - nodes.push(previous.index_to_node(lg.node_indices[i])); - fingerprints.push(previous.fingerprint_by_index(lg.node_indices[i])); - let edges = &lg.edges[i]; - edge_list_indices.push((edges.start.as_u32(), edges.end.as_u32())); - } - HybridIndex::DarkGreen(prev_index) => { - nodes.push(previous.index_to_node(prev_index)); - fingerprints.push(previous.fingerprint_by_index(prev_index)); - - let edges_iter = previous - .edge_targets_from(prev_index) - .iter() - .map(|&dst| prev_index_to_index[dst].as_ref().unwrap()); - - let start = edge_list_data.len() as u32; - edge_list_data.extend(edges_iter.map(|i| SDNI::new(i.index()))); - let end = edge_list_data.len() as u32; - edge_list_indices.push((start, end)); - } - } - } - - debug_assert_eq!(nodes.len(), node_count); - debug_assert_eq!(fingerprints.len(), node_count); - debug_assert_eq!(edge_list_indices.len(), node_count); - debug_assert_eq!(edge_list_data.len(), edge_count); - debug_assert!(edge_list_data.len() <= u32::MAX as usize); - - SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data } - } - pub fn node_color(&self, dep_node: &DepNode) -> Option { if let Some(ref data) = self.data { if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) { @@ -997,12 +907,251 @@ pub fn register_reused_dep_nodes>(&self, tcx: Ctxt } } + pub fn print_incremental_info(&self) { + #[derive(Clone)] + struct Stat { + kind: Kind, + node_counter: u64, + edge_counter: u64, + } + + let data = self.data.as_ref().unwrap(); + let prev = &data.previous; + let current = &data.current; + let data = current.data.lock(); + + let mut stats: FxHashMap<_, Stat> = FxHashMap::with_hasher(Default::default()); + + for &hybrid_index in data.hybrid_indices.iter() { + let (kind, edge_count) = match hybrid_index.into() { + HybridIndex::New(new_index) => { + let kind = data.new.nodes[new_index].kind; + let edge_range = &data.new.edges[new_index]; + (kind, edge_range.end.as_usize() - edge_range.start.as_usize()) + } + HybridIndex::Red(red_index) => { + let kind = prev.index_to_node(data.red.node_indices[red_index]).kind; + let edge_range = &data.red.edges[red_index]; + (kind, edge_range.end.as_usize() - edge_range.start.as_usize()) + } + HybridIndex::LightGreen(lg_index) => { + let kind = prev.index_to_node(data.light_green.node_indices[lg_index]).kind; + let edge_range = &data.light_green.edges[lg_index]; + (kind, edge_range.end.as_usize() - edge_range.start.as_usize()) + } + HybridIndex::DarkGreen(prev_index) => { + let kind = prev.index_to_node(prev_index).kind; + let edge_count = prev.edge_targets_from(prev_index).len(); + (kind, edge_count) + } + }; + + let stat = stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 }); + stat.node_counter += 1; + stat.edge_counter += edge_count as u64; + } + + let total_node_count = data.hybrid_indices.len(); + let total_edge_count = self.edge_count(&data); + + // Drop the lock guard. + std::mem::drop(data); + + let mut stats: Vec<_> = stats.values().cloned().collect(); + stats.sort_by_key(|s| -(s.node_counter as i64)); + + const SEPARATOR: &str = "[incremental] --------------------------------\ + ----------------------------------------------\ + ------------"; + + println!("[incremental]"); + println!("[incremental] DepGraph Statistics"); + println!("{}", SEPARATOR); + println!("[incremental]"); + println!("[incremental] Total Node Count: {}", total_node_count); + println!("[incremental] Total Edge Count: {}", total_edge_count); + + if cfg!(debug_assertions) { + let total_edge_reads = current.total_read_count.load(Relaxed); + let total_duplicate_edge_reads = current.total_duplicate_read_count.load(Relaxed); + + println!("[incremental] Total Edge Reads: {}", total_edge_reads); + println!("[incremental] Total Duplicate Edge Reads: {}", total_duplicate_edge_reads); + } + + println!("[incremental]"); + + println!( + "[incremental] {:<36}| {:<17}| {:<12}| {:<17}|", + "Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count" + ); + + println!( + "[incremental] -------------------------------------\ + |------------------\ + |-------------\ + |------------------|" + ); + + for stat in stats { + let node_kind_ratio = (100.0 * (stat.node_counter as f64)) / (total_node_count as f64); + let node_kind_avg_edges = (stat.edge_counter as f64) / (stat.node_counter as f64); + + println!( + "[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |", + format!("{:?}", stat.kind), + node_kind_ratio, + stat.node_counter, + node_kind_avg_edges, + ); + } + + println!("{}", SEPARATOR); + println!("[incremental]"); + } + fn next_virtual_depnode_index(&self) -> DepNodeIndex { let index = self.virtual_dep_node_index.fetch_add(1, Relaxed); DepNodeIndex::from_u32(index) } } +impl> Encodable for DepGraph { + fn encode(&self, e: &mut E) -> Result<(), E::Error> { + // We used to serialize the dep graph by creating and serializing a `SerializedDepGraph` + // using data copied from the `DepGraph`. But copying created a large memory spike, so we + // now serialize directly from the `DepGraph` as if it's a `SerializedDepGraph`. Because we + // deserialize that data into a `SerializedDepGraph` in the next compilation session, we + // need `DepGraph`'s `Encodable` and `SerializedDepGraph`'s `Decodable` implementations to + // be in sync. If you update this encoding, be sure to update the decoding, and vice-versa. + + let data = self.data.as_ref().unwrap(); + let prev = &data.previous; + + // Note locking order: `prev_index_to_index`, then `data`. + let prev_index_to_index = data.current.prev_index_to_index.lock(); + let data = data.current.data.lock(); + let new = &data.new; + let red = &data.red; + let lg = &data.light_green; + + let node_count = data.hybrid_indices.len(); + let edge_count = self.edge_count(&data); + + // `rustc_middle::ty::query::OnDiskCache` expects nodes to be encoded in `DepNodeIndex` + // order. The edges in `edge_list_data` don't need to be in a particular order, as long as + // each node references its edges as a contiguous range within it. Therefore, we can encode + // `edge_list_data` directly from `unshared_edges`. It meets the above requirements, as + // each non-dark-green node already knows the range of edges to reference within it, which + // they'll encode in `edge_list_indices`. Dark green nodes, however, don't have their edges + // in `unshared_edges`, so need to add them to `edge_list_data`. + + use HybridIndex::*; + + // Encoded values (nodes, etc.) are explicitly typed below to avoid inadvertently + // serializing data in the wrong format (i.e. one incompatible with `SerializedDepGraph`). + e.emit_struct("SerializedDepGraph", 4, |e| { + e.emit_struct_field("nodes", 0, |e| { + // `SerializedDepGraph` expects this to be encoded as a sequence of `DepNode`s. + e.emit_seq(node_count, |e| { + for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() { + let node: DepNode = match hybrid_index.into() { + New(i) => new.nodes[i], + Red(i) => prev.index_to_node(red.node_indices[i]), + LightGreen(i) => prev.index_to_node(lg.node_indices[i]), + DarkGreen(prev_index) => prev.index_to_node(prev_index), + }; + + e.emit_seq_elt(seq_index, |e| node.encode(e))?; + } + + Ok(()) + }) + })?; + + e.emit_struct_field("fingerprints", 1, |e| { + // `SerializedDepGraph` expects this to be encoded as a sequence of `Fingerprints`s. + e.emit_seq(node_count, |e| { + for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() { + let fingerprint: Fingerprint = match hybrid_index.into() { + New(i) => new.fingerprints[i], + Red(i) => red.fingerprints[i], + LightGreen(i) => prev.fingerprint_by_index(lg.node_indices[i]), + DarkGreen(prev_index) => prev.fingerprint_by_index(prev_index), + }; + + e.emit_seq_elt(seq_index, |e| fingerprint.encode(e))?; + } + + Ok(()) + }) + })?; + + e.emit_struct_field("edge_list_indices", 2, |e| { + // `SerializedDepGraph` expects this to be encoded as a sequence of `(u32, u32)`s. + e.emit_seq(node_count, |e| { + // Dark green node edges start after the unshared (all other nodes') edges. + let mut dark_green_edge_index = data.unshared_edges.len(); + + for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() { + let edge_indices: (u32, u32) = match hybrid_index.into() { + New(i) => (new.edges[i].start.as_u32(), new.edges[i].end.as_u32()), + Red(i) => (red.edges[i].start.as_u32(), red.edges[i].end.as_u32()), + LightGreen(i) => (lg.edges[i].start.as_u32(), lg.edges[i].end.as_u32()), + DarkGreen(prev_index) => { + let edge_count = prev.edge_targets_from(prev_index).len(); + let start = dark_green_edge_index as u32; + dark_green_edge_index += edge_count; + let end = dark_green_edge_index as u32; + (start, end) + } + }; + + e.emit_seq_elt(seq_index, |e| edge_indices.encode(e))?; + } + + assert_eq!(dark_green_edge_index, edge_count); + + Ok(()) + }) + })?; + + e.emit_struct_field("edge_list_data", 3, |e| { + // `SerializedDepGraph` expects this to be encoded as a sequence of + // `SerializedDepNodeIndex`. + e.emit_seq(edge_count, |e| { + for (seq_index, &edge) in data.unshared_edges.iter().enumerate() { + let serialized_edge = SerializedDepNodeIndex::new(edge.index()); + e.emit_seq_elt(seq_index, |e| serialized_edge.encode(e))?; + } + + let mut seq_index = data.unshared_edges.len(); + + for &hybrid_index in data.hybrid_indices.iter() { + if let DarkGreen(prev_index) = hybrid_index.into() { + for &edge in prev.edge_targets_from(prev_index) { + // Dark green node edges are stored in the previous graph + // and must be converted to edges in the current graph, + // and then serialized as `SerializedDepNodeIndex`. + let serialized_edge = SerializedDepNodeIndex::new( + prev_index_to_index[edge].as_ref().unwrap().index(), + ); + + e.emit_seq_elt(seq_index, |e| serialized_edge.encode(e))?; + seq_index += 1; + } + } + } + + assert_eq!(seq_index, edge_count); + + Ok(()) + }) + }) + }) + } +} + /// A "work product" is an intermediate result that we save into the /// incremental directory for later re-use. The primary example are /// the object files that we save for each partition at code diff --git a/compiler/rustc_query_system/src/dep_graph/prev.rs b/compiler/rustc_query_system/src/dep_graph/prev.rs index 29357ce9449..c3d0f795255 100644 --- a/compiler/rustc_query_system/src/dep_graph/prev.rs +++ b/compiler/rustc_query_system/src/dep_graph/prev.rs @@ -3,7 +3,7 @@ use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashMap; -#[derive(Debug, Encodable, Decodable)] +#[derive(Debug)] pub struct PreviousDepGraph { data: SerializedDepGraph, index: FxHashMap, SerializedDepNodeIndex>, diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index 28e07406918..9bb922b0a90 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -3,6 +3,7 @@ use super::{DepKind, DepNode}; use rustc_data_structures::fingerprint::Fingerprint; use rustc_index::vec::IndexVec; +use rustc_serialize::{Decodable, Decoder}; // The maximum value of `SerializedDepNodeIndex` leaves the upper two bits // unused so that we can store multiple index types in `CompressedHybridIndex`, @@ -14,7 +15,7 @@ pub struct SerializedDepNodeIndex { } /// Data for use when recompiling the **current crate**. -#[derive(Debug, Encodable, Decodable)] +#[derive(Debug)] pub struct SerializedDepGraph { /// The set of all DepNodes in the graph pub nodes: IndexVec>, @@ -48,3 +49,79 @@ pub fn edge_targets_from(&self, source: SerializedDepNodeIndex) -> &[SerializedD &self.edge_list_data[targets.0 as usize..targets.1 as usize] } } + +impl> Decodable for SerializedDepGraph { + fn decode(d: &mut D) -> Result, D::Error> { + // We used to serialize the dep graph by creating and serializing a `SerializedDepGraph` + // using data copied from the `DepGraph`. But copying created a large memory spike, so we + // now serialize directly from the `DepGraph` as if it's a `SerializedDepGraph`. Because we + // deserialize that data into a `SerializedDepGraph` in the next compilation session, we + // need `DepGraph`'s `Encodable` and `SerializedDepGraph`'s `Decodable` implementations to + // be in sync. If you update this decoding, be sure to update the encoding, and vice-versa. + // + // We mimic the sequence of `Encode` and `Encodable` method calls used by the `DepGraph`'s + // `Encodable` implementation with the corresponding sequence of `Decode` and `Decodable` + // method calls. E.g. `Decode::read_struct` pairs with `Encode::emit_struct`, `DepNode`'s + // `decode` pairs with `DepNode`'s `encode`, and so on. Any decoding methods not associated + // with corresponding encoding methods called in `DepGraph`'s `Encodable` implementation + // are off limits, because we'd be relying on their implementation details. + // + // For example, because we know it happens to do the right thing, its tempting to just use + // `IndexVec`'s `Decodable` implementation to decode into some of the collections below, + // even though `DepGraph` doesn't use its `Encodable` implementation. But the `IndexVec` + // implementation could change, and we'd have a bug. + // + // Variables below are explicitly typed so that anyone who changes the `SerializedDepGraph` + // representation without updating this function will encounter a compilation error, and + // know to update this and possibly the `DepGraph` `Encodable` implementation accordingly + // (the latter should serialize data in a format compatible with our representation). + + d.read_struct("SerializedDepGraph", 4, |d| { + let nodes: IndexVec> = + d.read_struct_field("nodes", 0, |d| { + d.read_seq(|d, len| { + let mut v = IndexVec::with_capacity(len); + for i in 0..len { + v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); + } + Ok(v) + }) + })?; + + let fingerprints: IndexVec = + d.read_struct_field("fingerprints", 1, |d| { + d.read_seq(|d, len| { + let mut v = IndexVec::with_capacity(len); + for i in 0..len { + v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); + } + Ok(v) + }) + })?; + + let edge_list_indices: IndexVec = d + .read_struct_field("edge_list_indices", 2, |d| { + d.read_seq(|d, len| { + let mut v = IndexVec::with_capacity(len); + for i in 0..len { + v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); + } + Ok(v) + }) + })?; + + let edge_list_data: Vec = + d.read_struct_field("edge_list_data", 3, |d| { + d.read_seq(|d, len| { + let mut v = Vec::with_capacity(len); + for i in 0..len { + v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); + } + Ok(v) + }) + })?; + + Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }) + }) + } +}