Skip to main content

turbopack/
global_module_ids.rs

1use anyhow::{Context, Result, bail};
2use rustc_hash::FxHashMap;
3use smallvec::SmallVec;
4use tracing::Instrument;
5use turbo_rcstr::RcStr;
6use turbo_tasks::{ReadRef, ResolvedVc, TryJoinIterExt, ValueToString, Vc};
7use turbo_tasks_hash::hash_xxh3_hash64;
8use turbopack_core::{
9    chunk::{
10        ChunkableModule, ChunkingType, ModuleId,
11        chunk_id_strategy::{ModuleIdFallback, ModuleIdStrategy},
12    },
13    ident::AssetIdent,
14    module::Module,
15    module_graph::{ModuleGraph, RefData},
16};
17use turbopack_ecmascript::async_chunk::module::AsyncLoaderModule;
18
19#[turbo_tasks::function]
20pub async fn get_global_module_id_strategy(
21    module_graph: ResolvedVc<ModuleGraph>,
22) -> Result<Vc<ModuleIdStrategy>> {
23    let span = tracing::info_span!("compute module id map");
24    async move {
25        let module_graph = module_graph.await?;
26        let graphs = &module_graph.graphs;
27
28        // All modules in the graph
29        let module_idents = graphs
30            .iter()
31            .flat_map(|graph| graph.iter_nodes())
32            .map(|m| m.ident());
33
34        // And additionally, all the modules that are inserted by chunking (i.e. async loaders)
35        let mut async_idents = vec![];
36        module_graph.traverse_edges_unordered(|parent, current| {
37            if let Some((
38                _,
39                &RefData {
40                    chunking_type: ChunkingType::Async,
41                    ..
42                },
43            )) = parent
44            {
45                let module = ResolvedVc::try_sidecast::<Box<dyn ChunkableModule>>(current)
46                    .context("expected chunkable module for async reference")?;
47                async_idents.push(AsyncLoaderModule::asset_ident_for(*module));
48            }
49            Ok(())
50        })?;
51
52        let mut module_id_map = module_idents
53            .chain(async_idents.into_iter())
54            .map(|ident| async move {
55                let ident = ident.to_resolved().await?;
56                let ident_str = ident.to_string().await?;
57                let hash = hash_xxh3_hash64(&ident_str);
58                Ok((ident, (ident_str, hash)))
59            })
60            .try_join()
61            .await?
62            .into_iter()
63            .collect::<FxHashMap<_, _>>();
64
65        finalize_module_ids(&mut module_id_map);
66
67        Ok(ModuleIdStrategy {
68            module_id_map: Some(ResolvedVc::cell(
69                module_id_map
70                    .into_iter()
71                    .map(|(ident, (_, hash))| {
72                        const JS_MAX_SAFE_INTEGER: u64 = (1u64 << 53) - 1;
73                        if hash > JS_MAX_SAFE_INTEGER {
74                            bail!("Numeric module id is too large: {}", hash);
75                        }
76                        Ok((ident, ModuleId::Number(hash)))
77                    })
78                    .collect::<Result<FxHashMap<_, _>>>()?,
79            )),
80            fallback: ModuleIdFallback::Error,
81        }
82        .cell())
83    }
84    .instrument(span)
85    .await
86}
87
88const JS_MAX_SAFE_INTEGER: u64 = (1u64 << 53) - 1;
89
90/// Shorten hashes and handle any collisions.
91fn finalize_module_ids(
92    merged_module_ids: &mut FxHashMap<ResolvedVc<AssetIdent>, (ReadRef<RcStr>, u64)>,
93) {
94    // 5% fill rate, as done in Webpack
95    // https://github.com/webpack/webpack/blob/27cf3e59f5f289dfc4d76b7a1df2edbc4e651589/lib/ids/IdHelpers.js#L366-L405
96    let optimal_range = merged_module_ids.len() * 20;
97    let digit_mask = std::cmp::min(
98        10u64.pow((optimal_range as f64).log10().ceil() as u32),
99        JS_MAX_SAFE_INTEGER,
100    );
101
102    let mut used_ids =
103        FxHashMap::<u64, SmallVec<[(ResolvedVc<AssetIdent>, ReadRef<RcStr>); 1]>>::default();
104
105    // Run in multiple passes, to not depend on the order of the `merged_module_ids` (i.e. the order
106    // of imports). Hashes could still change if modules are added or removed.
107
108    // Find pass: shorten hashes, potentially causing (more) collisions
109    for (ident, (ident_str, full_hash)) in merged_module_ids.iter_mut() {
110        let first_pass_hash = *full_hash % digit_mask;
111        used_ids
112            .entry(first_pass_hash)
113            .or_default()
114            .push((*ident, ident_str.clone()));
115        *full_hash = first_pass_hash;
116    }
117
118    // Filter conflicts
119    let mut conflicting_hashes = used_ids
120        .iter()
121        .filter(|(_, list)| list.len() > 1)
122        .map(|(hash, _)| *hash)
123        .collect::<Vec<_>>();
124    conflicting_hashes.sort();
125
126    // Second pass over the conflicts to resolve them
127    for hash in conflicting_hashes.into_iter() {
128        let list = used_ids.get_mut(&hash).unwrap();
129        // Take the vector but keep the (empty) entry, so that the "contains_key" check below works
130        let mut list = std::mem::take(list);
131        list.sort_by(|a, b| a.1.cmp(&b.1));
132
133        // Skip the first one, one module can keep the original hash
134        for (ident, _) in list.into_iter().skip(1) {
135            let hash = &mut merged_module_ids.get_mut(&ident).unwrap().1;
136
137            // the original algorithm since all that runs in deterministic order now
138            let mut i = 1;
139            let mut trimmed_hash;
140            loop {
141                // If the id is already used, find the next available hash.
142                trimmed_hash = hash_xxh3_hash64((*hash, i)) % digit_mask;
143                if !used_ids.contains_key(&trimmed_hash) {
144                    break;
145                }
146                i += 1;
147            }
148            // At this point, we don't care about the values anymore, just the keys
149            used_ids.entry(trimmed_hash).or_default();
150            *hash = trimmed_hash;
151        }
152    }
153}