1use std::{
2 collections::{VecDeque, hash_map::Entry},
3 hash::BuildHasherDefault,
4 mem::take,
5};
6
7use anyhow::{Context, Result, bail};
8use either::Either;
9use petgraph::graph::{DiGraph, EdgeIndex, NodeIndex};
10use rustc_hash::{FxHashMap, FxHashSet, FxHasher};
11use serde::{Deserialize, Serialize};
12use tracing::Instrument;
13use turbo_prehash::BuildHasherExt;
14use turbo_tasks::{
15 FxIndexMap, FxIndexSet, NonLocalValue, ResolvedVc, TaskInput, TryJoinIterExt, ValueToString,
16 Vc, trace::TraceRawVcs,
17};
18
19use crate::{
20 chunk::{ChunkableModule, ChunkingType},
21 module::Module,
22 module_graph::{
23 GraphTraversalAction, ModuleGraph, ModuleGraphRef,
24 chunk_group_info::{ChunkGroupInfo, ChunkGroupKey, RoaringBitmapWrapper},
25 module_batch::{ModuleBatch, ModuleBatchGroup, ModuleOrBatch},
26 traced_di_graph::{TracedDiGraph, iter_neighbors_rev},
27 },
28};
29#[turbo_tasks::value]
30#[derive(Debug, Clone, Default, TaskInput, Hash)]
31pub struct BatchingConfig {
32 pub use_heuristic: bool,
35}
36
37#[turbo_tasks::value_impl]
38impl BatchingConfig {
39 #[turbo_tasks::function]
40 pub fn new(config: BatchingConfig) -> Vc<Self> {
41 config.cell()
42 }
43}
44
45#[derive(Debug, Clone, Serialize, Deserialize, TraceRawVcs, NonLocalValue)]
46pub struct ModuleBatchesGraphEdge {
47 pub ty: ChunkingType,
48 pub module: Option<ResolvedVc<Box<dyn Module>>>,
49}
50
51type EntriesList = FxIndexSet<ResolvedVc<Box<dyn Module>>>;
52
53#[turbo_tasks::value(cell = "new", eq = "manual", into = "new")]
54pub struct ModuleBatchesGraph {
55 graph: TracedDiGraph<ModuleOrBatch, ModuleBatchesGraphEdge>,
56
57 #[turbo_tasks(trace_ignore)]
64 entries: FxHashMap<ResolvedVc<Box<dyn Module>>, NodeIndex>,
65 batch_groups: FxHashMap<ModuleOrBatch, ResolvedVc<ModuleBatchGroup>>,
66
67 ordered_entries: Vec<Option<EntriesList>>,
72}
73
74impl ModuleBatchesGraph {
75 pub async fn get_entry_index(&self, entry: ResolvedVc<Box<dyn Module>>) -> Result<NodeIndex> {
76 let Some(entry) = self.entries.get(&entry) else {
77 bail!(
78 "Entry {} is not in graph (possible entries: {:#?})",
79 entry.ident().to_string().await?,
80 self.entries
81 .keys()
82 .map(|e| e.ident().to_string())
83 .try_join()
84 .await?
85 );
86 };
87 Ok(*entry)
88 }
89
90 pub fn get_ordered_entries<'l>(
91 &'l self,
92 chunk_group_info: &'l ChunkGroupInfo,
93 idx: usize,
94 ) -> impl Iterator<Item = ResolvedVc<Box<dyn Module>>> + 'l {
95 if let Some(ordered_entries) = self
96 .ordered_entries
97 .get(idx)
98 .as_ref()
99 .and_then(|o| o.as_ref())
100 {
101 if let Some(chunk_group) = chunk_group_info.chunk_groups.get_index(idx) {
102 debug_assert_eq!(ordered_entries.len(), chunk_group.entries_count());
103 }
104 Either::Left(Either::Left(ordered_entries.iter().copied()))
105 } else if let Some(chunk_group) = chunk_group_info.chunk_groups.get_index(idx) {
106 Either::Right(chunk_group.entries())
107 } else {
108 Either::Left(Either::Right(std::iter::empty()))
109 }
110 }
111
112 pub fn get_batch_group(
113 &self,
114 module_or_batch: &ModuleOrBatch,
115 ) -> Option<ResolvedVc<ModuleBatchGroup>> {
116 self.batch_groups.get(module_or_batch).copied()
117 }
118
119 pub async fn get_entry(&self, entry: ResolvedVc<Box<dyn Module>>) -> Result<ModuleOrBatch> {
120 let entry = self.get_entry_index(entry).await?;
121 Ok(*self.graph.node_weight(entry).unwrap())
122 }
123
124 #[allow(clippy::implied_bounds_in_impls)]
126 pub fn traverse_edges_from_entries_dfs<'a, S>(
144 &'a self,
145 entries: impl IntoIterator<
146 Item = NodeIndex,
147 IntoIter = impl Iterator<Item = NodeIndex> + DoubleEndedIterator,
148 >,
149 state: &mut S,
150 mut visit_preorder: impl FnMut(
151 Option<(&'a ModuleOrBatch, &'a ModuleBatchesGraphEdge)>,
152 &'a ModuleOrBatch,
153 &mut S,
154 ) -> Result<GraphTraversalAction>,
155 mut visit_postorder: impl FnMut(
156 Option<(&'a ModuleOrBatch, &'a ModuleBatchesGraphEdge)>,
157 &'a ModuleOrBatch,
158 &mut S,
159 ),
160 ) -> Result<()> {
161 let graph = &self.graph;
162
163 enum ReverseDFSPass {
164 Visit,
165 ExpandAndVisit,
166 }
167
168 let entries = entries.into_iter();
169 #[allow(clippy::type_complexity)] let mut stack: Vec<(ReverseDFSPass, Option<(NodeIndex, EdgeIndex)>, NodeIndex)> = entries
171 .rev()
172 .map(|e| (ReverseDFSPass::ExpandAndVisit, None, e))
173 .collect();
174 let mut expanded = FxHashSet::default();
175 while let Some((pass, parent, current)) = stack.pop() {
176 let parent_arg = parent.map(|(node, edge)| {
177 (
178 graph.node_weight(node).unwrap(),
179 graph.edge_weight(edge).unwrap(),
180 )
181 });
182 match pass {
183 ReverseDFSPass::Visit => {
184 let current_node = graph.node_weight(current).unwrap();
185 visit_postorder(parent_arg, current_node, state);
186 }
187 ReverseDFSPass::ExpandAndVisit => {
188 let current_node = graph.node_weight(current).unwrap();
189 let action = visit_preorder(parent_arg, current_node, state)?;
190 if action == GraphTraversalAction::Exclude {
191 continue;
192 }
193 stack.push((ReverseDFSPass::Visit, parent, current));
194 if action == GraphTraversalAction::Continue && expanded.insert(current) {
195 stack.extend(iter_neighbors_rev(graph, current).map(|(edge, child)| {
196 (ReverseDFSPass::ExpandAndVisit, Some((current, edge)), child)
197 }));
198 }
199 }
200 }
201 }
202
203 Ok(())
204 }
205}
206
207type PreBatchIndex = usize;
208
209#[derive(Hash, PartialEq, Eq, Clone, Debug)]
210enum PreBatchItem {
211 ParallelModule(ResolvedVc<Box<dyn Module>>),
212 ParallelReference(PreBatchIndex),
213 NonParallelEdge(ChunkingType, ResolvedVc<Box<dyn Module>>),
214}
215
216struct PreBatch {
217 items: FxIndexSet<PreBatchItem>,
218 chunk_groups: RoaringBitmapWrapper,
219}
220
221impl PreBatch {
222 fn new(chunk_groups: RoaringBitmapWrapper) -> Self {
223 Self {
224 items: FxIndexSet::default(),
225 chunk_groups,
226 }
227 }
228}
229
230struct TraversalState<'l> {
231 items: Vec<PreBatchItem>,
232 this: &'l mut PreBatches,
233}
234
235struct PreBatches {
236 boundary_modules: FxHashSet<ResolvedVc<Box<dyn Module>>>,
237 batches: Vec<PreBatch>,
238 entries: FxHashMap<ResolvedVc<Box<dyn Module>>, PreBatchIndex>,
239 single_module_entries: FxIndexSet<ResolvedVc<Box<dyn Module>>>,
240}
241
242impl PreBatches {
243 fn new() -> Self {
244 Self {
245 boundary_modules: FxHashSet::default(),
246 batches: Vec::new(),
247 entries: FxHashMap::default(),
248 single_module_entries: FxIndexSet::default(),
249 }
250 }
251
252 fn ensure_pre_batch_for_module(
253 &mut self,
254 module: ResolvedVc<Box<dyn Module>>,
255 chunk_group_info: &ChunkGroupInfo,
256 queue: &mut VecDeque<(ResolvedVc<Box<dyn Module>>, PreBatchIndex)>,
257 ) -> Result<PreBatchIndex> {
258 Ok(match self.entries.entry(module) {
259 Entry::Vacant(e) => {
260 let index = self.batches.len();
261 queue.push_back((module, index));
262 let chunk_groups = chunk_group_info
263 .module_chunk_groups
264 .get(&module)
265 .context("all modules need to have chunk group info")?;
266 let batch = PreBatch::new(chunk_groups.clone());
267 self.batches.push(batch);
268 e.insert(index);
269 index
270 }
271 Entry::Occupied(e) => *e.get(),
272 })
273 }
274
275 async fn get_pre_batch_items(
276 &mut self,
277 entry: ResolvedVc<Box<dyn Module>>,
278 chunk_group_info: &ChunkGroupInfo,
279 module_graph: &ModuleGraphRef,
280 queue: &mut VecDeque<(ResolvedVc<Box<dyn Module>>, PreBatchIndex)>,
281 ) -> Result<Vec<PreBatchItem>> {
282 let mut state = TraversalState {
283 items: Vec::new(),
284 this: self,
285 };
286 let mut visited = FxHashSet::default();
287 module_graph.traverse_edges_from_entries_dfs(
288 std::iter::once(entry),
289 &mut state,
290 |parent_info, node, state| {
291 let ty = parent_info.map_or(
292 &ChunkingType::Parallel {
293 inherit_async: false,
294 hoisted: false,
295 },
296 |(_, ty)| &ty.chunking_type,
297 );
298 let module = node.module;
299 if !ty.is_parallel() {
300 state.items.push(PreBatchItem::NonParallelEdge(
301 ty.without_inherit_async(),
302 module,
303 ));
304 return Ok(GraphTraversalAction::Exclude);
305 }
306 if visited.insert(module) {
307 if parent_info.is_some() && state.this.boundary_modules.contains(&module) {
308 let idx = state.this.ensure_pre_batch_for_module(
309 module,
310 chunk_group_info,
311 queue,
312 )?;
313 state.items.push(PreBatchItem::ParallelReference(idx));
314 return Ok(GraphTraversalAction::Exclude);
315 }
316 Ok(GraphTraversalAction::Continue)
317 } else {
318 Ok(GraphTraversalAction::Exclude)
319 }
320 },
321 |_, node, state| {
322 let item = PreBatchItem::ParallelModule(node.module);
323 state.items.push(item);
324 Ok(())
325 },
326 )?;
327 Ok(state.items)
328 }
329}
330
331pub async fn compute_module_batches(
332 module_graph: Vc<ModuleGraph>,
333 _config: &BatchingConfig,
334) -> Result<Vc<ModuleBatchesGraph>> {
335 let outer_span = tracing::info_span!(
336 "compute module batches",
337 initial_pre_batch_items = tracing::field::Empty,
338 initial_pre_batches = tracing::field::Empty,
339 extracted_shared_items = tracing::field::Empty,
340 batches = tracing::field::Empty,
341 modules = tracing::field::Empty,
342 edges = tracing::field::Empty
343 );
344 let span = outer_span.clone();
345 async move {
346 let chunk_group_info = module_graph.chunk_group_info().await?;
347 let module_graph = module_graph.read_graphs().await?;
348
349 let mut pre_batches = PreBatches::new();
350
351 module_graph.traverse_all_edges_unordered(|(parent, ty), node| {
354 let std::collections::hash_set::Entry::Vacant(entry) =
355 pre_batches.boundary_modules.entry(node.module)
356 else {
357 return Ok(());
359 };
360 if ty.chunking_type.is_parallel() {
361 let parent_chunk_groups = chunk_group_info
362 .module_chunk_groups
363 .get(&parent.module)
364 .context("all modules need to have chunk group info")?;
365 let chunk_groups = chunk_group_info
366 .module_chunk_groups
367 .get(&node.module)
368 .context("all modules need to have chunk group info")?;
369 if parent_chunk_groups != chunk_groups {
370 entry.insert();
372 }
373 } else {
374 entry.insert();
375 }
376 Ok(())
377 })?;
378
379 for chunk_group in &chunk_group_info.chunk_groups {
381 for entry in chunk_group.entries() {
382 pre_batches.boundary_modules.insert(entry);
383 }
384 }
385
386 module_graph.traverse_cycles(
389 |ref_data| ref_data.chunking_type.is_parallel(),
390 |cycle| {
391 if cycle
392 .iter()
393 .any(|node| pre_batches.boundary_modules.contains(&node.module))
394 {
395 pre_batches
396 .boundary_modules
397 .extend(cycle.iter().map(|node| node.module));
398 }
399 },
400 )?;
401
402 let mut queue: VecDeque<(ResolvedVc<Box<dyn Module>>, PreBatchIndex)> = VecDeque::new();
403
404 let mut chunk_group_indices_with_merged_children = FxHashSet::default();
405
406 for chunk_group in &chunk_group_info.chunk_groups {
408 for entry in chunk_group.entries() {
409 pre_batches.ensure_pre_batch_for_module(entry, &chunk_group_info, &mut queue)?;
410 }
411 if let Some(parent) = chunk_group.get_merged_parent() {
412 chunk_group_indices_with_merged_children.insert(parent);
413 }
414 }
415
416 let mut initial_pre_batch_items = 0;
417 while let Some((chunkable_module, idx)) = queue.pop_front() {
419 let items = pre_batches
420 .get_pre_batch_items(
421 chunkable_module,
422 &chunk_group_info,
423 &module_graph,
424 &mut queue,
425 )
426 .await?;
427 initial_pre_batch_items += items.len();
428 let batch = &mut pre_batches.batches[idx];
429 batch.items.extend(items);
430 }
431 span.record("initial_pre_batch_items", initial_pre_batch_items);
432 span.record("initial_pre_batches", pre_batches.batches.len());
433
434 let mut ordered_entries: Vec<Option<EntriesList>> =
436 vec![None; chunk_group_info.chunk_groups.len()];
437 for (i, chunk_group) in chunk_group_info.chunk_groups.iter().enumerate() {
438 if !chunk_group_indices_with_merged_children.contains(&i) {
439 continue;
440 }
441 let mut merged_modules: FxHashMap<ChunkingType, FxIndexSet<_>> = FxHashMap::default();
442 let mut stack = ordered_entries[i]
443 .as_ref()
444 .map_or_else(
445 || Either::Left(chunk_group.entries()),
446 |v| Either::Right(v.iter().copied()),
447 )
448 .map(|module| {
449 let idx = *pre_batches.entries.get(&module).unwrap();
450 (idx, 0)
451 })
452 .collect::<Vec<_>>();
453 stack.reverse();
454 let mut visited = FxHashSet::default();
455 while let Some((idx, mut pos)) = stack.pop() {
456 let batch = &pre_batches.batches[idx];
457 while let Some(item) = batch.items.get_index(pos) {
458 match item {
459 PreBatchItem::ParallelModule(_) => {}
460 PreBatchItem::ParallelReference(other_idx) => {
461 if visited.insert(*other_idx) {
462 stack.push((idx, pos + 1));
463 stack.push((*other_idx, 0));
464 break;
465 }
466 }
467 PreBatchItem::NonParallelEdge(chunking_type, module) => {
468 if chunking_type.is_merged() {
469 merged_modules
470 .entry(chunking_type.clone())
471 .or_default()
472 .insert(*module);
473 }
474 }
475 }
476 pos += 1;
477 }
478 }
479 if !merged_modules.is_empty() {
480 for (ty, merged_modules) in merged_modules {
481 let chunk_group_key = match ty {
482 ChunkingType::Isolated {
483 merge_tag: Some(merge_tag),
484 ..
485 } => ChunkGroupKey::IsolatedMerged {
486 parent: i.into(),
487 merge_tag: merge_tag.clone(),
488 },
489 ChunkingType::Shared {
490 merge_tag: Some(merge_tag),
491 ..
492 } => ChunkGroupKey::SharedMerged {
493 parent: i.into(),
494 merge_tag: merge_tag.clone(),
495 },
496 _ => unreachable!(),
497 };
498 let idx = chunk_group_info
499 .chunk_group_keys
500 .get_index_of(&chunk_group_key)
501 .unwrap();
502 ordered_entries[idx] = Some(merged_modules);
503 }
504 }
505 }
506
507 let mut parallel_module_to_pre_batch: FxIndexMap<_, Vec<PreBatchIndex>> =
509 FxIndexMap::default();
510
511 for (idx, pre_batch) in pre_batches.batches.iter().enumerate() {
513 for item in &pre_batch.items {
514 match item {
515 PreBatchItem::ParallelModule(module) => {
516 parallel_module_to_pre_batch
517 .entry(*module)
518 .or_default()
519 .push(idx);
520 }
521 PreBatchItem::NonParallelEdge(_, module) => {
522 if !pre_batches.entries.contains_key(module) {
523 pre_batches.single_module_entries.insert(*module);
524 }
525 }
526 PreBatchItem::ParallelReference(_) => {}
527 }
528 }
529 }
530
531 let mut extracted_shared_items = 0;
534 for i in 0..parallel_module_to_pre_batch.len() {
536 let (&module, batches) = parallel_module_to_pre_batch.get_index(i).unwrap();
537 if batches.len() > 1 {
538 let batches_with_item_index = batches
540 .iter()
541 .map(|&idx| {
542 let batch_items = &pre_batches.batches[idx].items;
543 let item_idx = batch_items
544 .get_index_of(&PreBatchItem::ParallelModule(module))
545 .unwrap();
546 (idx, item_idx)
547 })
548 .collect::<Vec<_>>();
549 let mut selected_items = 1;
550 fn get_item_at(
551 pre_batches: &PreBatches,
552 batch_idx: PreBatchIndex,
553 item_idx: usize,
554 ) -> Option<&PreBatchItem> {
555 pre_batches.batches[batch_idx].items.get_index(item_idx)
556 }
557 loop {
560 if let Some(PreBatchItem::ParallelModule(next_module)) = get_item_at(
561 &pre_batches,
562 batches_with_item_index[0].0,
563 batches_with_item_index[0].1 + selected_items,
564 ) && parallel_module_to_pre_batch.get(next_module).unwrap().len()
565 == batches.len()
566 && batches_with_item_index[1..]
567 .iter()
568 .all(|&(batch_idx, item_idx)| {
569 get_item_at(&pre_batches, batch_idx, item_idx + selected_items)
570 == Some(&PreBatchItem::ParallelModule(*next_module))
571 })
572 {
573 selected_items += 1;
574 continue;
575 }
576 break;
577 }
578 extracted_shared_items += selected_items;
579
580 let exact_match = batches_with_item_index
583 .iter()
584 .find(|&&(batch_idx, item_idx)| {
585 item_idx == 0
586 && pre_batches.batches[batch_idx].items.len() == selected_items
587 });
588 if let Some(&(exact_match, _)) = exact_match {
589 for &(batch_index, item_start) in batches_with_item_index.iter() {
591 if batch_index != exact_match {
592 pre_batches.batches[batch_index].items.splice(
593 item_start..item_start + selected_items,
594 std::iter::once(PreBatchItem::ParallelReference(exact_match)),
595 );
596 }
597 }
598 for item in pre_batches.batches[exact_match].items.iter() {
599 if let PreBatchItem::ParallelModule(module) = item {
600 parallel_module_to_pre_batch
601 .get_mut(module)
602 .unwrap()
603 .clear();
604 }
605 }
606 } else {
607 let first_batch_index = batches_with_item_index[0].0;
610 let first_batch_item_index = batches_with_item_index[0].1;
611 let new_batch_index = pre_batches.batches.len();
612 let mut new_batch =
613 PreBatch::new(pre_batches.batches[first_batch_index].chunk_groups.clone());
614 new_batch
615 .items
616 .extend(pre_batches.batches[first_batch_index].items.splice(
617 first_batch_item_index..first_batch_item_index + selected_items,
618 std::iter::once(PreBatchItem::ParallelReference(new_batch_index)),
619 ));
620 for item in new_batch.items.iter() {
621 if let PreBatchItem::ParallelModule(module) = item {
622 parallel_module_to_pre_batch
623 .get_mut(module)
624 .unwrap()
625 .clear();
626 }
627 }
628 pre_batches.batches.push(new_batch);
629 for &(batch_index, item_start) in batches_with_item_index[1..].iter() {
630 pre_batches.batches[batch_index].items.splice(
631 item_start..item_start + selected_items,
632 std::iter::once(PreBatchItem::ParallelReference(new_batch_index)),
633 );
634 }
635 }
636 }
637 }
638 span.record("extracted_shared_items", extracted_shared_items);
639
640 let mut edges_count = 0;
643
644 for i in 0..pre_batches.batches.len() {
647 let items = take(&mut pre_batches.batches[i].items);
648 let mut new_items =
649 FxIndexSet::with_capacity_and_hasher(items.len(), Default::default());
650 enum Mode {
651 ParallelChunkableModule,
652 Other,
653 }
654 let mut mode = Mode::Other;
655 for item in items {
656 let chunkable_module = if let PreBatchItem::ParallelModule(module) = &item {
657 ResolvedVc::try_downcast::<Box<dyn ChunkableModule>>(*module)
658 } else {
659 None
660 };
661 let item = if let PreBatchItem::ParallelModule(module) = item {
662 if chunkable_module.is_some() {
663 PreBatchItem::ParallelModule(module)
664 } else {
665 pre_batches.single_module_entries.insert(module);
666 PreBatchItem::NonParallelEdge(
667 ChunkingType::Parallel {
668 inherit_async: false,
669 hoisted: false,
670 },
671 module,
672 )
673 }
674 } else {
675 item
676 };
677 match (&mode, chunkable_module) {
678 (_, Some(_)) => {
679 mode = Mode::ParallelChunkableModule;
680 new_items.insert(item);
681 }
682 (Mode::Other, _) => {
683 edges_count += 1;
684 new_items.insert(item);
685 }
686 (Mode::ParallelChunkableModule, _) => {
687 let idx = pre_batches.batches.len();
689 let mut new_batch =
690 PreBatch::new(pre_batches.batches[i].chunk_groups.clone());
691 new_batch.items.extend(new_items.drain(..));
692 pre_batches.batches.push(new_batch);
693 edges_count += 1;
694 new_items.insert(PreBatchItem::ParallelReference(idx));
695 if chunkable_module.is_some() {
696 new_items.insert(item);
697 } else {
698 edges_count += 1;
699 mode = Mode::Other;
700 new_items.insert(item);
701 }
702 }
703 }
704 }
705 pre_batches.batches[i].items = new_items;
706 }
707 span.record("pre_batches", pre_batches.batches.len());
708
709 let mut graph: DiGraph<ModuleOrBatch, ModuleBatchesGraphEdge, u32> =
713 petgraph::graph::DiGraph::with_capacity(
714 pre_batches.batches.len() + pre_batches.single_module_entries.len(),
715 edges_count,
716 );
717
718 let batches = pre_batches
720 .batches
721 .iter_mut()
722 .enumerate()
723 .map(async |(i, pre_batch)| {
724 let mut modules = pre_batch.items.iter().filter_map(|item| {
725 if let PreBatchItem::ParallelModule(module) = item {
726 ResolvedVc::try_downcast(*module)
727 } else {
728 None
729 }
730 });
731 let Some(first) = modules.next() else {
732 return Ok(ModuleOrBatch::None(i));
733 };
734 if let Some(second) = modules.next() {
735 let batch = ModuleBatch::new(
736 [first, second]
737 .into_iter()
738 .chain(modules)
739 .map(|m| *m)
740 .collect::<Vec<_>>(),
741 Some(pre_batch.chunk_groups.clone()),
742 );
743 Ok(ModuleOrBatch::Batch(batch.to_resolved().await?))
744 } else {
745 Ok(ModuleOrBatch::Module(ResolvedVc::upcast(first)))
746 }
747 })
748 .try_join()
749 .await?;
750
751 let mut batch_groups: FxHashMap<_, Vec<_>> = FxHashMap::default();
753 for (i, pre_batch) in pre_batches.batches.iter().enumerate() {
754 let key = BuildHasherDefault::<FxHasher>::default().prehash(&pre_batch.chunk_groups);
755 let batch = batches[i];
756 batch_groups.entry(key).or_default().push(batch);
757 }
758 for &module in &pre_batches.single_module_entries {
759 let chunk_groups = chunk_group_info
760 .module_chunk_groups
761 .get(&module)
762 .context("all modules need to have chunk group info")?;
763 let key = BuildHasherDefault::<FxHasher>::default().prehash(chunk_groups);
764 batch_groups
765 .entry(key)
766 .or_default()
767 .push(ModuleOrBatch::Module(module));
768 }
769
770 let batch_groups = batch_groups
772 .into_iter()
773 .map(async |(key, items)| {
774 if items.len() == 1 {
775 Ok(Either::Left(std::iter::empty()))
776 } else {
777 let batch_group = ModuleBatchGroup::new(items.clone(), (*key).clone())
778 .to_resolved()
779 .await?;
780 Ok(Either::Right(
781 items.into_iter().map(move |item| (item, batch_group)),
782 ))
783 }
784 })
785 .try_join()
786 .await?
787 .into_iter()
788 .flatten()
789 .collect::<FxHashMap<_, _>>();
790
791 let mut batches_count = 0;
793 let mut modules_count = 0;
794 let batch_indices = batches
795 .into_iter()
796 .map(|batch| {
797 match &batch {
798 ModuleOrBatch::Batch(_) => batches_count += 1,
799 ModuleOrBatch::Module(_) => modules_count += 1,
800 ModuleOrBatch::None(_) => {}
801 }
802 graph.add_node(batch)
803 })
804 .collect::<Vec<_>>();
805
806 let single_module_indices = pre_batches
808 .single_module_entries
809 .iter()
810 .map(|module| graph.add_node(ModuleOrBatch::Module(*module)))
811 .collect::<Vec<_>>();
812
813 span.record("batches", batches_count);
814 modules_count += pre_batches.single_module_entries.len();
815 span.record("modules", modules_count);
816 span.record("edges", edges_count);
817
818 for (i, pre_batch) in pre_batches.batches.into_iter().enumerate() {
820 let index = batch_indices[i];
821 let items = pre_batch.items;
822 for item in items {
823 match item {
824 PreBatchItem::ParallelReference(idx) => {
825 graph.add_edge(
826 index,
827 batch_indices[idx],
828 ModuleBatchesGraphEdge {
829 ty: ChunkingType::Parallel {
830 inherit_async: false,
831 hoisted: false,
832 },
833 module: None,
834 },
835 );
836 }
837 PreBatchItem::NonParallelEdge(ty, module) => {
838 if let Some(batch) = pre_batches.entries.get(&module).copied() {
839 graph.add_edge(
840 index,
841 batch_indices[batch],
842 ModuleBatchesGraphEdge {
843 ty,
844 module: Some(module),
845 },
846 );
847 continue;
848 }
849 let idx = pre_batches
850 .single_module_entries
851 .get_index_of(&module)
852 .unwrap();
853 let idx = single_module_indices[idx];
854 graph.add_edge(
855 index,
856 idx,
857 ModuleBatchesGraphEdge {
858 ty,
859 module: Some(module),
860 },
861 );
862 }
863 PreBatchItem::ParallelModule(_) => {}
864 }
865 }
866 }
867
868 debug_assert_eq!(graph.capacity().0, graph.node_count());
869 debug_assert_eq!(graph.capacity().1, graph.edge_count());
870
871 let mut entries = FxHashMap::default();
873 for chunk_group in &chunk_group_info.chunk_groups {
874 for module in chunk_group.entries() {
875 if let Some(batch) = pre_batches.entries.get(&module).copied() {
876 entries.insert(module, batch_indices[batch]);
877 continue;
878 }
879 let idx = pre_batches
880 .single_module_entries
881 .get_index_of(&module)
882 .unwrap();
883 let idx = single_module_indices[idx];
884 entries.insert(module, idx);
885 }
886 }
887
888 Ok(ModuleBatchesGraph {
889 graph: TracedDiGraph(graph),
890 entries,
891 batch_groups,
892 ordered_entries,
893 }
894 .cell())
895 }
896 .instrument(outer_span)
897 .await
898}