7 //! the nodes representing the files and directories at the root of the |
7 //! the nodes representing the files and directories at the root of the |
8 //! repository. Each node is also fixed-size, defined by the `Node` struct. |
8 //! repository. Each node is also fixed-size, defined by the `Node` struct. |
9 //! Nodes in turn contain slices to variable-size paths, and to their own child |
9 //! Nodes in turn contain slices to variable-size paths, and to their own child |
10 //! nodes (if any) for nested files and directories. |
10 //! nodes (if any) for nested files and directories. |
11 |
11 |
12 use crate::dirstate_tree::dirstate_map::{self, DirstateMap}; |
12 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef}; |
13 use crate::dirstate_tree::path_with_basename::WithBasename; |
13 use crate::dirstate_tree::path_with_basename::WithBasename; |
14 use crate::errors::HgError; |
14 use crate::errors::HgError; |
15 use crate::utils::hg_path::HgPath; |
15 use crate::utils::hg_path::HgPath; |
16 use crate::DirstateEntry; |
16 use crate::DirstateEntry; |
17 use crate::DirstateError; |
17 use crate::DirstateError; |
198 read_slice::<Node>(on_disk, slice)? |
198 read_slice::<Node>(on_disk, slice)? |
199 .iter() |
199 .iter() |
200 .map(|node| { |
200 .map(|node| { |
201 Ok((node.path(on_disk)?, node.to_in_memory_node(on_disk)?)) |
201 Ok((node.path(on_disk)?, node.to_in_memory_node(on_disk)?)) |
202 }) |
202 }) |
203 .collect() |
203 .collect::<Result<_, _>>() |
|
204 .map(dirstate_map::ChildNodes::InMemory) |
204 } |
205 } |
205 |
206 |
206 fn read_hg_path(on_disk: &[u8], slice: Slice) -> Result<Cow<HgPath>, HgError> { |
207 fn read_hg_path(on_disk: &[u8], slice: Slice) -> Result<Cow<HgPath>, HgError> { |
207 let bytes = read_slice::<u8>(on_disk, slice)?; |
208 let bytes = read_slice::<u8>(on_disk, slice)?; |
208 Ok(Cow::Borrowed(HgPath::new(bytes))) |
209 Ok(Cow::Borrowed(HgPath::new(bytes))) |
240 |
241 |
241 // Keep space for the header. We’ll fill it out at the end when we know the |
242 // Keep space for the header. We’ll fill it out at the end when we know the |
242 // actual offset for the root nodes. |
243 // actual offset for the root nodes. |
243 out.resize(header_len, 0_u8); |
244 out.resize(header_len, 0_u8); |
244 |
245 |
245 let root = write_nodes(&mut dirstate_map.root, &mut out)?; |
246 let root = write_nodes(dirstate_map.root.as_ref(), &mut out)?; |
246 |
247 |
247 let header = Header { |
248 let header = Header { |
248 marker: *V2_FORMAT_MARKER, |
249 marker: *V2_FORMAT_MARKER, |
249 parents: parents, |
250 parents: parents, |
250 root, |
251 root, |
256 out[..header_len].copy_from_slice(header.as_bytes()); |
257 out[..header_len].copy_from_slice(header.as_bytes()); |
257 Ok(out) |
258 Ok(out) |
258 } |
259 } |
259 |
260 |
260 fn write_nodes( |
261 fn write_nodes( |
261 nodes: &dirstate_map::ChildNodes, |
262 nodes: dirstate_map::ChildNodesRef, |
262 out: &mut Vec<u8>, |
263 out: &mut Vec<u8>, |
263 ) -> Result<ChildNodes, DirstateError> { |
264 ) -> Result<ChildNodes, DirstateError> { |
264 // `dirstate_map::ChildNodes` is a `HashMap` with undefined iteration |
265 // `dirstate_map::ChildNodes` is a `HashMap` with undefined iteration |
265 // order. Sort to enable binary search in the written file. |
266 // order. Sort to enable binary search in the written file. |
266 let nodes = dirstate_map::Node::sorted(nodes); |
267 let nodes = nodes.sorted(); |
267 |
268 |
268 // First accumulate serialized nodes in a `Vec` |
269 // First accumulate serialized nodes in a `Vec` |
269 let mut on_disk_nodes = Vec::with_capacity(nodes.len()); |
270 let mut on_disk_nodes = Vec::with_capacity(nodes.len()); |
270 for (full_path, node) in nodes { |
271 for node in nodes { |
271 on_disk_nodes.push(Node { |
272 let children = write_nodes(node.children(), out)?; |
272 children: write_nodes(&node.children, out)?, |
273 let full_path = write_slice::<u8>(node.full_path().as_bytes(), out); |
273 tracked_descendants_count: node.tracked_descendants_count.into(), |
274 let copy_source = if let Some(source) = node.copy_source() { |
274 full_path: write_slice::<u8>( |
275 write_slice::<u8>(source.as_bytes(), out) |
275 full_path.full_path().as_bytes(), |
276 } else { |
276 out, |
277 Slice { |
277 ), |
278 start: 0.into(), |
278 base_name_start: u32::try_from(full_path.base_name_start()) |
279 len: 0.into(), |
279 // Could only panic for paths over 4 GiB |
280 } |
280 .expect("dirstate-v2 offset overflow") |
281 }; |
281 .into(), |
282 on_disk_nodes.push(match node { |
282 copy_source: if let Some(source) = &node.copy_source { |
283 NodeRef::InMemory(path, node) => Node { |
283 write_slice::<u8>(source.as_bytes(), out) |
284 children, |
284 } else { |
285 copy_source, |
285 Slice { |
286 full_path, |
286 start: 0.into(), |
287 base_name_start: u32::try_from(path.base_name_start()) |
287 len: 0.into(), |
288 // Could only panic for paths over 4 GiB |
288 } |
289 .expect("dirstate-v2 offset overflow") |
289 }, |
290 .into(), |
290 entry: if let Some(entry) = &node.entry { |
291 tracked_descendants_count: node |
291 OptEntry { |
292 .tracked_descendants_count |
292 state: entry.state.into(), |
293 .into(), |
293 mode: entry.mode.into(), |
294 entry: if let Some(entry) = &node.entry { |
294 mtime: entry.mtime.into(), |
295 OptEntry { |
295 size: entry.size.into(), |
296 state: entry.state.into(), |
296 } |
297 mode: entry.mode.into(), |
297 } else { |
298 mtime: entry.mtime.into(), |
298 OptEntry { |
299 size: entry.size.into(), |
299 state: b'\0', |
300 } |
300 mode: 0.into(), |
301 } else { |
301 mtime: 0.into(), |
302 OptEntry { |
302 size: 0.into(), |
303 state: b'\0', |
303 } |
304 mode: 0.into(), |
|
305 mtime: 0.into(), |
|
306 size: 0.into(), |
|
307 } |
|
308 }, |
304 }, |
309 }, |
305 }) |
310 }) |
306 } |
311 } |
307 // … so we can write them contiguously |
312 // … so we can write them contiguously |
308 Ok(write_slice::<Node>(&on_disk_nodes, out)) |
313 Ok(write_slice::<Node>(&on_disk_nodes, out)) |