next_swc_napi/next_api/
project.rs

1use std::{borrow::Cow, io::Write, path::PathBuf, sync::Arc, thread, time::Duration};
2
3use anyhow::{Context, Result, anyhow, bail};
4use flate2::write::GzEncoder;
5use futures_util::TryFutureExt;
6use napi::{
7    Env, JsFunction, JsObject, Status,
8    bindgen_prelude::{External, within_runtime_if_available},
9    threadsafe_function::{ThreadsafeFunction, ThreadsafeFunctionCallMode},
10};
11use next_api::{
12    entrypoints::Entrypoints,
13    next_server_nft::next_server_nft_assets,
14    operation::{
15        EntrypointsOperation, InstrumentationOperation, MiddlewareOperation, OptionEndpoint,
16        RouteOperation,
17    },
18    project::{
19        DefineEnv, DraftModeOptions, PartialProjectOptions, Project, ProjectContainer,
20        ProjectOptions, WatchOptions,
21    },
22    route::Endpoint,
23};
24use next_core::tracing_presets::{
25    TRACING_NEXT_OVERVIEW_TARGETS, TRACING_NEXT_TARGETS, TRACING_NEXT_TURBO_TASKS_TARGETS,
26    TRACING_NEXT_TURBOPACK_TARGETS,
27};
28use once_cell::sync::Lazy;
29use rand::Rng;
30use serde::{Deserialize, Serialize};
31use tokio::{io::AsyncWriteExt, runtime::Handle, time::Instant};
32use tracing::Instrument;
33use tracing_subscriber::{Registry, layer::SubscriberExt, util::SubscriberInitExt};
34use turbo_rcstr::{RcStr, rcstr};
35use turbo_tasks::{
36    Effects, FxIndexSet, NonLocalValue, OperationValue, OperationVc, ReadRef, ResolvedVc,
37    TaskInput, TransientInstance, TryJoinIterExt, TurboTasksApi, UpdateInfo, Vc, get_effects,
38    message_queue::{CompilationEvent, Severity},
39    trace::TraceRawVcs,
40};
41use turbo_tasks_backend::{BackingStorage, db_invalidation::invalidation_reasons};
42use turbo_tasks_fs::{
43    DiskFileSystem, FileContent, FileSystem, FileSystemPath, util::uri_from_file,
44};
45use turbo_unix_path::{get_relative_path_to, sys_to_unix};
46use turbopack_core::{
47    PROJECT_FILESYSTEM_NAME, SOURCE_URL_PROTOCOL,
48    diagnostics::PlainDiagnostic,
49    error::PrettyPrintError,
50    issue::PlainIssue,
51    output::{OutputAsset, OutputAssets},
52    source_map::{OptionStringifiedSourceMap, SourceMap, Token},
53    version::{PartialUpdate, TotalUpdate, Update, VersionState},
54};
55use turbopack_ecmascript_hmr_protocol::{ClientUpdateInstruction, Issue, ResourceIdentifier};
56use turbopack_trace_utils::{
57    exit::{ExitHandler, ExitReceiver},
58    filter_layer::FilterLayer,
59    raw_trace::RawTraceLayer,
60    trace_writer::TraceWriter,
61};
62use url::Url;
63
64use crate::{
65    next_api::{
66        endpoint::ExternalEndpoint,
67        turbopack_ctx::{
68            NapiNextTurbopackCallbacks, NapiNextTurbopackCallbacksJsObject, NextTurboTasks,
69            NextTurbopackContext, create_turbo_tasks,
70        },
71        utils::{
72            DetachedVc, NapiDiagnostic, NapiIssue, RootTask, TurbopackResult, get_diagnostics,
73            get_issues, strongly_consistent_catch_collectables, subscribe,
74        },
75    },
76    util::DhatProfilerGuard,
77};
78
79/// Used by [`benchmark_file_io`]. This is a noisy benchmark, so set the
80/// threshold high.
81const SLOW_FILESYSTEM_THRESHOLD: Duration = Duration::from_millis(100);
82static SOURCE_MAP_PREFIX: Lazy<String> = Lazy::new(|| format!("{SOURCE_URL_PROTOCOL}///"));
83static SOURCE_MAP_PREFIX_PROJECT: Lazy<String> =
84    Lazy::new(|| format!("{SOURCE_URL_PROTOCOL}///[{PROJECT_FILESYSTEM_NAME}]/"));
85
86#[napi(object)]
87#[derive(Clone, Debug)]
88pub struct NapiEnvVar {
89    pub name: RcStr,
90    pub value: RcStr,
91}
92
93#[napi(object)]
94#[derive(Clone, Debug)]
95pub struct NapiOptionEnvVar {
96    pub name: RcStr,
97    pub value: Option<RcStr>,
98}
99
100#[napi(object)]
101pub struct NapiDraftModeOptions {
102    pub preview_mode_id: RcStr,
103    pub preview_mode_encryption_key: RcStr,
104    pub preview_mode_signing_key: RcStr,
105}
106
107impl From<NapiDraftModeOptions> for DraftModeOptions {
108    fn from(val: NapiDraftModeOptions) -> Self {
109        DraftModeOptions {
110            preview_mode_id: val.preview_mode_id,
111            preview_mode_encryption_key: val.preview_mode_encryption_key,
112            preview_mode_signing_key: val.preview_mode_signing_key,
113        }
114    }
115}
116
117#[napi(object)]
118pub struct NapiWatchOptions {
119    /// Whether to watch the filesystem for file changes.
120    pub enable: bool,
121
122    /// Enable polling at a certain interval if the native file watching doesn't work (e.g.
123    /// docker).
124    pub poll_interval_ms: Option<f64>,
125}
126
127#[napi(object)]
128pub struct NapiProjectOptions {
129    /// An absolute root path (Unix or Windows path) from which all files must be nested under.
130    /// Trying to access a file outside this root will fail, so think of this as a chroot.
131    /// E.g. `/home/user/projects/my-repo`.
132    pub root_path: RcStr,
133
134    /// A path which contains the app/pages directories, relative to [`Project::root_path`], always
135    /// Unix path. E.g. `apps/my-app`
136    pub project_path: RcStr,
137
138    /// A path where to emit the build outputs, relative to [`Project::project_path`], always Unix
139    /// path. Corresponds to next.config.js's `distDir`.
140    /// E.g. `.next`
141    pub dist_dir: RcStr,
142
143    /// Filesystem watcher options.
144    pub watch: NapiWatchOptions,
145
146    /// The contents of next.config.js, serialized to JSON.
147    pub next_config: RcStr,
148
149    /// A map of environment variables to use when compiling code.
150    pub env: Vec<NapiEnvVar>,
151
152    /// A map of environment variables which should get injected at compile
153    /// time.
154    pub define_env: NapiDefineEnv,
155
156    /// The mode in which Next.js is running.
157    pub dev: bool,
158
159    /// The server actions encryption key.
160    pub encryption_key: RcStr,
161
162    /// The build id.
163    pub build_id: RcStr,
164
165    /// Options for draft mode.
166    pub preview_props: NapiDraftModeOptions,
167
168    /// The browserslist query to use for targeting browsers.
169    pub browserslist_query: RcStr,
170
171    /// When the code is minified, this opts out of the default mangling of
172    /// local names for variables, functions etc., which can be useful for
173    /// debugging/profiling purposes.
174    pub no_mangling: bool,
175
176    /// The version of Node.js that is available/currently running.
177    pub current_node_js_version: RcStr,
178}
179
180/// [NapiProjectOptions] with all fields optional.
181#[napi(object)]
182pub struct NapiPartialProjectOptions {
183    /// An absolute root path  (Unix or Windows path) from which all files must be nested under.
184    /// Trying to access a file outside this root will fail, so think of this as a chroot.
185    /// E.g. `/home/user/projects/my-repo`.
186    pub root_path: Option<RcStr>,
187
188    /// A path which contains the app/pages directories, relative to [`Project::root_path`], always
189    /// a Unix path.
190    /// E.g. `apps/my-app`
191    pub project_path: Option<RcStr>,
192
193    /// A path where to emit the build outputs, relative to [`Project::project_path`], always a
194    /// Unix path. Corresponds to next.config.js's `distDir`.
195    /// E.g. `.next`
196    pub dist_dir: Option<Option<RcStr>>,
197
198    /// Filesystem watcher options.
199    pub watch: Option<NapiWatchOptions>,
200
201    /// The contents of next.config.js, serialized to JSON.
202    pub next_config: Option<RcStr>,
203
204    /// A map of environment variables to use when compiling code.
205    pub env: Option<Vec<NapiEnvVar>>,
206
207    /// A map of environment variables which should get injected at compile
208    /// time.
209    pub define_env: Option<NapiDefineEnv>,
210
211    /// The mode in which Next.js is running.
212    pub dev: Option<bool>,
213
214    /// The server actions encryption key.
215    pub encryption_key: Option<RcStr>,
216
217    /// The build id.
218    pub build_id: Option<RcStr>,
219
220    /// Options for draft mode.
221    pub preview_props: Option<NapiDraftModeOptions>,
222
223    /// The browserslist query to use for targeting browsers.
224    pub browserslist_query: Option<RcStr>,
225
226    /// When the code is minified, this opts out of the default mangling of
227    /// local names for variables, functions etc., which can be useful for
228    /// debugging/profiling purposes.
229    pub no_mangling: Option<bool>,
230}
231
232#[napi(object)]
233#[derive(Clone, Debug)]
234pub struct NapiDefineEnv {
235    pub client: Vec<NapiOptionEnvVar>,
236    pub edge: Vec<NapiOptionEnvVar>,
237    pub nodejs: Vec<NapiOptionEnvVar>,
238}
239
240#[napi(object)]
241pub struct NapiTurboEngineOptions {
242    /// Use the new backend with filesystem cache enabled.
243    pub persistent_caching: Option<bool>,
244    /// An upper bound of memory that turbopack will attempt to stay under.
245    pub memory_limit: Option<f64>,
246    /// Track dependencies between tasks. If false, any change during build will error.
247    pub dependency_tracking: Option<bool>,
248    /// Whether the project is running in a CI environment.
249    pub is_ci: Option<bool>,
250    /// Whether the project is running in a short session.
251    pub is_short_session: Option<bool>,
252}
253
254impl From<NapiWatchOptions> for WatchOptions {
255    fn from(val: NapiWatchOptions) -> Self {
256        WatchOptions {
257            enable: val.enable,
258            poll_interval: val
259                .poll_interval_ms
260                .filter(|interval| !interval.is_nan() && interval.is_finite() && *interval > 0.0)
261                .map(|interval| Duration::from_secs_f64(interval / 1000.0)),
262        }
263    }
264}
265
266impl From<NapiProjectOptions> for ProjectOptions {
267    fn from(val: NapiProjectOptions) -> Self {
268        ProjectOptions {
269            root_path: val.root_path,
270            project_path: val.project_path,
271            watch: val.watch.into(),
272            next_config: val.next_config,
273            env: val
274                .env
275                .into_iter()
276                .map(|var| (var.name, var.value))
277                .collect(),
278            define_env: val.define_env.into(),
279            dev: val.dev,
280            encryption_key: val.encryption_key,
281            build_id: val.build_id,
282            preview_props: val.preview_props.into(),
283            browserslist_query: val.browserslist_query,
284            no_mangling: val.no_mangling,
285            current_node_js_version: val.current_node_js_version,
286        }
287    }
288}
289
290impl From<NapiPartialProjectOptions> for PartialProjectOptions {
291    fn from(val: NapiPartialProjectOptions) -> Self {
292        PartialProjectOptions {
293            root_path: val.root_path,
294            project_path: val.project_path,
295            watch: val.watch.map(From::from),
296            next_config: val.next_config,
297            env: val
298                .env
299                .map(|env| env.into_iter().map(|var| (var.name, var.value)).collect()),
300            define_env: val.define_env.map(|env| env.into()),
301            dev: val.dev,
302            encryption_key: val.encryption_key,
303            build_id: val.build_id,
304            preview_props: val.preview_props.map(|props| props.into()),
305        }
306    }
307}
308
309impl From<NapiDefineEnv> for DefineEnv {
310    fn from(val: NapiDefineEnv) -> Self {
311        DefineEnv {
312            client: val
313                .client
314                .into_iter()
315                .map(|var| (var.name, var.value))
316                .collect(),
317            edge: val
318                .edge
319                .into_iter()
320                .map(|var| (var.name, var.value))
321                .collect(),
322            nodejs: val
323                .nodejs
324                .into_iter()
325                .map(|var| (var.name, var.value))
326                .collect(),
327        }
328    }
329}
330
331pub struct ProjectInstance {
332    turbopack_ctx: NextTurbopackContext,
333    container: ResolvedVc<ProjectContainer>,
334    exit_receiver: tokio::sync::Mutex<Option<ExitReceiver>>,
335}
336
337#[napi(ts_return_type = "Promise<{ __napiType: \"Project\" }>")]
338pub fn project_new(
339    env: Env,
340    options: NapiProjectOptions,
341    turbo_engine_options: NapiTurboEngineOptions,
342    napi_callbacks: NapiNextTurbopackCallbacksJsObject,
343) -> napi::Result<JsObject> {
344    let napi_callbacks = NapiNextTurbopackCallbacks::from_js(napi_callbacks)?;
345    let (exit, exit_receiver) = ExitHandler::new_receiver();
346
347    if let Some(dhat_profiler) = DhatProfilerGuard::try_init() {
348        exit.on_exit(async move {
349            tokio::task::spawn_blocking(move || drop(dhat_profiler))
350                .await
351                .unwrap()
352        });
353    }
354
355    let mut trace = std::env::var("NEXT_TURBOPACK_TRACING")
356        .ok()
357        .filter(|v| !v.is_empty());
358
359    if cfg!(feature = "tokio-console") && trace.is_none() {
360        // ensure `trace` is set to *something* so that the `tokio-console` feature works,
361        // otherwise you just get empty output from `tokio-console`, which can be
362        // confusing.
363        trace = Some("overview".to_owned());
364    }
365
366    enum Compression {
367        None,
368        GzipFast,
369        GzipBest,
370    }
371    let mut compress = Compression::None;
372    if let Some(mut trace) = trace {
373        println!("Turbopack tracing enabled with targets: {trace}");
374        println!("  Note that this might have a small performance impact.");
375
376        trace = trace
377            .split(",")
378            .filter_map(|item| {
379                // Trace presets
380                Some(match item {
381                    "overview" | "1" => Cow::Owned(TRACING_NEXT_OVERVIEW_TARGETS.join(",")),
382                    "next" => Cow::Owned(TRACING_NEXT_TARGETS.join(",")),
383                    "turbopack" => Cow::Owned(TRACING_NEXT_TURBOPACK_TARGETS.join(",")),
384                    "turbo-tasks" => Cow::Owned(TRACING_NEXT_TURBO_TASKS_TARGETS.join(",")),
385                    "gz" => {
386                        compress = Compression::GzipFast;
387                        return None;
388                    }
389                    "gz-best" => {
390                        compress = Compression::GzipBest;
391                        return None;
392                    }
393                    _ => Cow::Borrowed(item),
394                })
395            })
396            .intersperse_with(|| Cow::Borrowed(","))
397            .collect::<String>();
398
399        let subscriber = Registry::default();
400
401        if cfg!(feature = "tokio-console") {
402            trace = format!("{trace},tokio=trace,runtime=trace");
403        }
404        #[cfg(feature = "tokio-console")]
405        let subscriber = subscriber.with(console_subscriber::spawn());
406
407        let subscriber = subscriber.with(FilterLayer::try_new(&trace).unwrap());
408
409        let internal_dir = PathBuf::from(&options.root_path)
410            .join(&options.project_path)
411            .join(&options.dist_dir);
412        std::fs::create_dir_all(&internal_dir)
413            .context("Unable to create .next directory")
414            .unwrap();
415        let trace_file;
416        let (trace_writer, trace_writer_guard) = match compress {
417            Compression::None => {
418                trace_file = internal_dir.join("trace-turbopack");
419                let trace_writer = std::fs::File::create(trace_file.clone()).unwrap();
420                TraceWriter::new(trace_writer)
421            }
422            Compression::GzipFast => {
423                trace_file = internal_dir.join("trace-turbopack");
424                let trace_writer = std::fs::File::create(trace_file.clone()).unwrap();
425                let trace_writer = GzEncoder::new(trace_writer, flate2::Compression::fast());
426                TraceWriter::new(trace_writer)
427            }
428            Compression::GzipBest => {
429                trace_file = internal_dir.join("trace-turbopack");
430                let trace_writer = std::fs::File::create(trace_file.clone()).unwrap();
431                let trace_writer = GzEncoder::new(trace_writer, flate2::Compression::best());
432                TraceWriter::new(trace_writer)
433            }
434        };
435        let subscriber = subscriber.with(RawTraceLayer::new(trace_writer));
436
437        exit.on_exit(async move {
438            tokio::task::spawn_blocking(move || drop(trace_writer_guard))
439                .await
440                .unwrap();
441        });
442
443        let trace_server = std::env::var("NEXT_TURBOPACK_TRACE_SERVER").ok();
444        if trace_server.is_some() {
445            thread::spawn(move || {
446                turbopack_trace_server::start_turbopack_trace_server(trace_file, None);
447            });
448            println!("Turbopack trace server started. View trace at https://trace.nextjs.org");
449        }
450
451        subscriber.init();
452    }
453
454    env.spawn_future(
455        async move {
456            let memory_limit = turbo_engine_options
457                .memory_limit
458                .map(|m| m as usize)
459                .unwrap_or(usize::MAX);
460            let persistent_caching = turbo_engine_options.persistent_caching.unwrap_or_default();
461            let dependency_tracking = turbo_engine_options.dependency_tracking.unwrap_or(true);
462            let is_ci = turbo_engine_options.is_ci.unwrap_or(false);
463            let is_short_session = turbo_engine_options.is_short_session.unwrap_or(false);
464            let turbo_tasks = create_turbo_tasks(
465                PathBuf::from(&options.dist_dir),
466                persistent_caching,
467                memory_limit,
468                dependency_tracking,
469                is_ci,
470                is_short_session,
471            )?;
472            let turbopack_ctx = NextTurbopackContext::new(turbo_tasks.clone(), napi_callbacks);
473
474            if let Some(stats_path) = std::env::var_os("NEXT_TURBOPACK_TASK_STATISTICS") {
475                let task_stats = turbo_tasks.task_statistics().enable().clone();
476                exit.on_exit(async move {
477                    tokio::task::spawn_blocking(move || {
478                        let mut file = std::fs::File::create(&stats_path)
479                            .with_context(|| format!("failed to create or open {stats_path:?}"))?;
480                        serde_json::to_writer(&file, &task_stats)
481                            .context("failed to serialize or write task statistics")?;
482                        file.flush().context("failed to flush file")
483                    })
484                    .await
485                    .unwrap()
486                    .unwrap();
487                });
488            }
489
490            let options: ProjectOptions = options.into();
491            let is_dev = options.dev;
492            let container = turbo_tasks
493                .run(async move {
494                    let project = ProjectContainer::new(rcstr!("next.js"), is_dev);
495                    let project = project.to_resolved().await?;
496                    project.initialize(options).await?;
497                    Ok(project)
498                })
499                .or_else(|e| turbopack_ctx.throw_turbopack_internal_result(&e.into()))
500                .await?;
501
502            if is_dev {
503                Handle::current().spawn({
504                    let tt = turbo_tasks.clone();
505                    async move {
506                        let result = tt
507                            .clone()
508                            .run(async move {
509                                benchmark_file_io(
510                                    tt,
511                                    container.project().node_root().owned().await?,
512                                )
513                                .await
514                            })
515                            .await;
516                        if let Err(err) = result {
517                            // TODO Not ideal to print directly to stdout.
518                            // We should use a compilation event instead to report async errors.
519                            println!("Failed to benchmark file I/O: {err}");
520                        }
521                    }
522                    .instrument(tracing::info_span!("benchmark file I/O"))
523                });
524            }
525
526            Ok(External::new(ProjectInstance {
527                turbopack_ctx,
528                container,
529                exit_receiver: tokio::sync::Mutex::new(Some(exit_receiver)),
530            }))
531        }
532        .instrument(tracing::info_span!("create project")),
533    )
534}
535
536#[derive(Debug, Clone, Serialize)]
537struct SlowFilesystemEvent {
538    directory: String,
539    duration_ms: u128,
540}
541
542impl CompilationEvent for SlowFilesystemEvent {
543    fn type_name(&self) -> &'static str {
544        "SlowFilesystemEvent"
545    }
546
547    fn severity(&self) -> Severity {
548        Severity::Warning
549    }
550
551    fn message(&self) -> String {
552        format!(
553            "Slow filesystem detected. The benchmark took {}ms. If {} is a network drive, \
554             consider moving it to a local folder. If you have an antivirus enabled, consider \
555             excluding your project directory.",
556            self.duration_ms, self.directory
557        )
558    }
559
560    fn to_json(&self) -> String {
561        serde_json::to_string(self).unwrap()
562    }
563}
564
565/// A very simple and low-overhead, but potentially noisy benchmark to detect
566/// very slow disk IO. Warns the user (via `println!`) if the benchmark takes
567/// more than `SLOW_FILESYSTEM_THRESHOLD`.
568///
569/// This idea is copied from Bun:
570/// - https://x.com/jarredsumner/status/1637549427677364224
571/// - https://github.com/oven-sh/bun/blob/06a9aa80c38b08b3148bfeabe560/src/install/install.zig#L3038
572async fn benchmark_file_io(turbo_tasks: NextTurboTasks, directory: FileSystemPath) -> Result<()> {
573    // try to get the real file path on disk so that we can use it with tokio
574    let fs = ResolvedVc::try_downcast_type::<DiskFileSystem>(directory.fs)
575        .context(anyhow!(
576            "expected node_root to be a DiskFileSystem, cannot benchmark"
577        ))?
578        .await?;
579
580    let directory = fs.to_sys_path(&directory);
581    let temp_path = directory.join(format!(
582        "tmp_file_io_benchmark_{:x}",
583        rand::random::<u128>()
584    ));
585
586    let mut random_buffer = [0u8; 512];
587    rand::rng().fill(&mut random_buffer[..]);
588
589    // perform IO directly with tokio (skipping `tokio_tasks_fs`) to avoid the
590    // additional noise/overhead of tasks caching, invalidation, file locks,
591    // etc.
592    let start = Instant::now();
593    async {
594        for _ in 0..3 {
595            // create a new empty file
596            let mut file = tokio::fs::File::create(&temp_path).await?;
597            file.write_all(&random_buffer).await?;
598            file.sync_all().await?;
599            drop(file);
600
601            // remove the file
602            tokio::fs::remove_file(&temp_path).await?;
603        }
604        anyhow::Ok(())
605    }
606    .instrument(tracing::info_span!("benchmark file IO (measurement)", path = %temp_path.display()))
607    .await?;
608
609    let duration = Instant::now().duration_since(start);
610    if duration > SLOW_FILESYSTEM_THRESHOLD {
611        turbo_tasks.send_compilation_event(Arc::new(SlowFilesystemEvent {
612            directory: directory.to_string_lossy().into(),
613            duration_ms: duration.as_millis(),
614        }));
615    }
616
617    Ok(())
618}
619
620#[tracing::instrument(level = "info", name = "update project", skip_all)]
621#[napi]
622pub async fn project_update(
623    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
624    options: NapiPartialProjectOptions,
625) -> napi::Result<()> {
626    let ctx = &project.turbopack_ctx;
627    let options = options.into();
628    let container = project.container;
629    ctx.turbo_tasks()
630        .run(async move {
631            container.update(options).await?;
632            Ok(())
633        })
634        .or_else(|e| ctx.throw_turbopack_internal_result(&e.into()))
635        .await
636}
637
638/// Invalidates the filesystem cache so that it will be deleted next time that a turbopack project
639/// is created with filesystem cache enabled.
640#[napi]
641pub async fn project_invalidate_file_system_cache(
642    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
643) -> napi::Result<()> {
644    tokio::task::spawn_blocking(move || {
645        // TODO: Let the JS caller specify a reason? We need to limit the reasons to ones we know
646        // how to generate a message for on the Rust side of the FFI.
647        project
648            .turbopack_ctx
649            .turbo_tasks()
650            .backend()
651            .backing_storage()
652            .invalidate(invalidation_reasons::USER_REQUEST)
653    })
654    .await
655    .context("panicked while invalidating filesystem cache")??;
656    Ok(())
657}
658
659/// Runs exit handlers for the project registered using the [`ExitHandler`] API.
660///
661/// This is called by `project_shutdown`, so if you're calling that API, you shouldn't call this
662/// one.
663#[napi]
664pub async fn project_on_exit(
665    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
666) {
667    project_on_exit_internal(&project).await
668}
669
670async fn project_on_exit_internal(project: &ProjectInstance) {
671    let exit_receiver = project.exit_receiver.lock().await.take();
672    exit_receiver
673        .expect("`project.onExitSync` must only be called once")
674        .run_exit_handler()
675        .await;
676}
677
678/// Runs `project_on_exit`, and then waits for turbo_tasks to gracefully shut down.
679///
680/// This is used in builds where it's important that we completely persist turbo-tasks to disk, but
681/// it's skipped in the development server (`project_on_exit` is used instead with a short timeout),
682/// where we prioritize fast exit and user responsiveness over all else.
683#[tracing::instrument(level = "info", name = "shutdown project", skip_all)]
684#[napi]
685pub async fn project_shutdown(
686    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
687) {
688    project.turbopack_ctx.turbo_tasks().stop_and_wait().await;
689    project_on_exit_internal(&project).await;
690}
691
692#[napi(object)]
693#[derive(Default)]
694pub struct AppPageNapiRoute {
695    /// The relative path from project_path to the route file
696    pub original_name: Option<RcStr>,
697
698    pub html_endpoint: Option<External<ExternalEndpoint>>,
699    pub rsc_endpoint: Option<External<ExternalEndpoint>>,
700}
701
702#[napi(object)]
703#[derive(Default)]
704pub struct NapiRoute {
705    /// The router path
706    pub pathname: String,
707    /// The relative path from project_path to the route file
708    pub original_name: Option<RcStr>,
709
710    /// The type of route, eg a Page or App
711    pub r#type: &'static str,
712
713    pub pages: Option<Vec<AppPageNapiRoute>>,
714
715    // Different representations of the endpoint
716    pub endpoint: Option<External<ExternalEndpoint>>,
717    pub html_endpoint: Option<External<ExternalEndpoint>>,
718    pub rsc_endpoint: Option<External<ExternalEndpoint>>,
719    pub data_endpoint: Option<External<ExternalEndpoint>>,
720}
721
722impl NapiRoute {
723    fn from_route(
724        pathname: String,
725        value: RouteOperation,
726        turbopack_ctx: &NextTurbopackContext,
727    ) -> Self {
728        let convert_endpoint = |endpoint: OperationVc<OptionEndpoint>| {
729            Some(External::new(ExternalEndpoint(DetachedVc::new(
730                turbopack_ctx.clone(),
731                endpoint,
732            ))))
733        };
734        match value {
735            RouteOperation::Page {
736                html_endpoint,
737                data_endpoint,
738            } => NapiRoute {
739                pathname,
740                r#type: "page",
741                html_endpoint: convert_endpoint(html_endpoint),
742                data_endpoint: convert_endpoint(data_endpoint),
743                ..Default::default()
744            },
745            RouteOperation::PageApi { endpoint } => NapiRoute {
746                pathname,
747                r#type: "page-api",
748                endpoint: convert_endpoint(endpoint),
749                ..Default::default()
750            },
751            RouteOperation::AppPage(pages) => NapiRoute {
752                pathname,
753                r#type: "app-page",
754                pages: Some(
755                    pages
756                        .into_iter()
757                        .map(|page_route| AppPageNapiRoute {
758                            original_name: Some(page_route.original_name),
759                            html_endpoint: convert_endpoint(page_route.html_endpoint),
760                            rsc_endpoint: convert_endpoint(page_route.rsc_endpoint),
761                        })
762                        .collect(),
763                ),
764                ..Default::default()
765            },
766            RouteOperation::AppRoute {
767                original_name,
768                endpoint,
769            } => NapiRoute {
770                pathname,
771                original_name: Some(original_name),
772                r#type: "app-route",
773                endpoint: convert_endpoint(endpoint),
774                ..Default::default()
775            },
776            RouteOperation::Conflict => NapiRoute {
777                pathname,
778                r#type: "conflict",
779                ..Default::default()
780            },
781        }
782    }
783}
784
785#[napi(object)]
786pub struct NapiMiddleware {
787    pub endpoint: External<ExternalEndpoint>,
788    pub is_proxy: bool,
789}
790
791impl NapiMiddleware {
792    fn from_middleware(
793        value: &MiddlewareOperation,
794        turbopack_ctx: &NextTurbopackContext,
795    ) -> Result<Self> {
796        Ok(NapiMiddleware {
797            endpoint: External::new(ExternalEndpoint(DetachedVc::new(
798                turbopack_ctx.clone(),
799                value.endpoint,
800            ))),
801            is_proxy: value.is_proxy,
802        })
803    }
804}
805
806#[napi(object)]
807pub struct NapiInstrumentation {
808    pub node_js: External<ExternalEndpoint>,
809    pub edge: External<ExternalEndpoint>,
810}
811
812impl NapiInstrumentation {
813    fn from_instrumentation(
814        value: &InstrumentationOperation,
815        turbopack_ctx: &NextTurbopackContext,
816    ) -> Result<Self> {
817        Ok(NapiInstrumentation {
818            node_js: External::new(ExternalEndpoint(DetachedVc::new(
819                turbopack_ctx.clone(),
820                value.node_js,
821            ))),
822            edge: External::new(ExternalEndpoint(DetachedVc::new(
823                turbopack_ctx.clone(),
824                value.edge,
825            ))),
826        })
827    }
828}
829
830#[napi(object)]
831pub struct NapiEntrypoints {
832    pub routes: Vec<NapiRoute>,
833    pub middleware: Option<NapiMiddleware>,
834    pub instrumentation: Option<NapiInstrumentation>,
835    pub pages_document_endpoint: External<ExternalEndpoint>,
836    pub pages_app_endpoint: External<ExternalEndpoint>,
837    pub pages_error_endpoint: External<ExternalEndpoint>,
838}
839
840impl NapiEntrypoints {
841    fn from_entrypoints_op(
842        entrypoints: &EntrypointsOperation,
843        turbopack_ctx: &NextTurbopackContext,
844    ) -> Result<Self> {
845        let routes = entrypoints
846            .routes
847            .iter()
848            .map(|(k, v)| NapiRoute::from_route(k.to_string(), v.clone(), turbopack_ctx))
849            .collect();
850        let middleware = entrypoints
851            .middleware
852            .as_ref()
853            .map(|m| NapiMiddleware::from_middleware(m, turbopack_ctx))
854            .transpose()?;
855        let instrumentation = entrypoints
856            .instrumentation
857            .as_ref()
858            .map(|i| NapiInstrumentation::from_instrumentation(i, turbopack_ctx))
859            .transpose()?;
860        let pages_document_endpoint = External::new(ExternalEndpoint(DetachedVc::new(
861            turbopack_ctx.clone(),
862            entrypoints.pages_document_endpoint,
863        )));
864        let pages_app_endpoint = External::new(ExternalEndpoint(DetachedVc::new(
865            turbopack_ctx.clone(),
866            entrypoints.pages_app_endpoint,
867        )));
868        let pages_error_endpoint = External::new(ExternalEndpoint(DetachedVc::new(
869            turbopack_ctx.clone(),
870            entrypoints.pages_error_endpoint,
871        )));
872        Ok(NapiEntrypoints {
873            routes,
874            middleware,
875            instrumentation,
876            pages_document_endpoint,
877            pages_app_endpoint,
878            pages_error_endpoint,
879        })
880    }
881}
882
883#[turbo_tasks::value(serialization = "none")]
884struct EntrypointsWithIssues {
885    entrypoints: Option<ReadRef<EntrypointsOperation>>,
886    issues: Arc<Vec<ReadRef<PlainIssue>>>,
887    diagnostics: Arc<Vec<ReadRef<PlainDiagnostic>>>,
888    effects: Arc<Effects>,
889}
890
891#[turbo_tasks::function(operation)]
892async fn get_entrypoints_with_issues_operation(
893    container: ResolvedVc<ProjectContainer>,
894) -> Result<Vc<EntrypointsWithIssues>> {
895    let entrypoints_operation =
896        EntrypointsOperation::new(project_container_entrypoints_operation(container));
897    let (entrypoints, issues, diagnostics, effects) =
898        strongly_consistent_catch_collectables(entrypoints_operation).await?;
899    Ok(EntrypointsWithIssues {
900        entrypoints,
901        issues,
902        diagnostics,
903        effects,
904    }
905    .cell())
906}
907
908#[turbo_tasks::function(operation)]
909fn project_container_entrypoints_operation(
910    // the container is a long-lived object with internally mutable state, there's no risk of it
911    // becoming stale
912    container: ResolvedVc<ProjectContainer>,
913) -> Vc<Entrypoints> {
914    container.entrypoints()
915}
916
917#[turbo_tasks::value(serialization = "none")]
918struct AllWrittenEntrypointsWithIssues {
919    entrypoints: Option<ReadRef<EntrypointsOperation>>,
920    issues: Arc<Vec<ReadRef<PlainIssue>>>,
921    diagnostics: Arc<Vec<ReadRef<PlainDiagnostic>>>,
922    effects: Arc<Effects>,
923}
924
925#[tracing::instrument(level = "info", name = "write all entrypoints to disk", skip_all)]
926#[napi]
927pub async fn project_write_all_entrypoints_to_disk(
928    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
929    app_dir_only: bool,
930) -> napi::Result<TurbopackResult<Option<NapiEntrypoints>>> {
931    let ctx = &project.turbopack_ctx;
932    let container = project.container;
933    let tt = ctx.turbo_tasks();
934
935    let (entrypoints, issues, diags) = tt
936        .run(async move {
937            let entrypoints_with_issues_op =
938                get_all_written_entrypoints_with_issues_operation(container, app_dir_only);
939
940            // Read and compile the files
941            let AllWrittenEntrypointsWithIssues {
942                entrypoints,
943                issues,
944                diagnostics,
945                effects,
946            } = &*entrypoints_with_issues_op
947                .read_strongly_consistent()
948                .await?;
949
950            // Write the files to disk
951            effects.apply().await?;
952
953            Ok((entrypoints.clone(), issues.clone(), diagnostics.clone()))
954        })
955        .or_else(|e| ctx.throw_turbopack_internal_result(&e.into()))
956        .await?;
957
958    Ok(TurbopackResult {
959        result: if let Some(entrypoints) = entrypoints {
960            Some(NapiEntrypoints::from_entrypoints_op(
961                &entrypoints,
962                &project.turbopack_ctx,
963            )?)
964        } else {
965            None
966        },
967        issues: issues.iter().map(|i| NapiIssue::from(&**i)).collect(),
968        diagnostics: diags.iter().map(|d| NapiDiagnostic::from(d)).collect(),
969    })
970}
971
972#[turbo_tasks::function(operation)]
973async fn get_all_written_entrypoints_with_issues_operation(
974    container: ResolvedVc<ProjectContainer>,
975    app_dir_only: bool,
976) -> Result<Vc<AllWrittenEntrypointsWithIssues>> {
977    let entrypoints_operation = EntrypointsOperation::new(all_entrypoints_write_to_disk_operation(
978        container,
979        app_dir_only,
980    ));
981    let (entrypoints, issues, diagnostics, effects) =
982        strongly_consistent_catch_collectables(entrypoints_operation).await?;
983    Ok(AllWrittenEntrypointsWithIssues {
984        entrypoints,
985        issues,
986        diagnostics,
987        effects,
988    }
989    .cell())
990}
991
992#[turbo_tasks::function(operation)]
993pub async fn all_entrypoints_write_to_disk_operation(
994    project: ResolvedVc<ProjectContainer>,
995    app_dir_only: bool,
996) -> Result<Vc<Entrypoints>> {
997    project
998        .project()
999        .emit_all_output_assets(output_assets_operation(project, app_dir_only))
1000        .as_side_effect()
1001        .await?;
1002
1003    Ok(project.entrypoints())
1004}
1005
1006#[turbo_tasks::function(operation)]
1007async fn output_assets_operation(
1008    container: ResolvedVc<ProjectContainer>,
1009    app_dir_only: bool,
1010) -> Result<Vc<OutputAssets>> {
1011    let endpoint_assets = container
1012        .project()
1013        .get_all_endpoints(app_dir_only)
1014        .await?
1015        .iter()
1016        .map(|endpoint| async move { endpoint.output().await?.output_assets.await })
1017        .try_join()
1018        .await?;
1019
1020    let output_assets: FxIndexSet<ResolvedVc<Box<dyn OutputAsset>>> = endpoint_assets
1021        .iter()
1022        .flat_map(|assets| assets.iter().copied())
1023        .collect();
1024
1025    let nft = next_server_nft_assets(container.project()).await?;
1026
1027    Ok(Vc::cell(
1028        output_assets
1029            .into_iter()
1030            .chain(nft.iter().copied())
1031            .collect(),
1032    ))
1033}
1034
1035#[tracing::instrument(level = "info", name = "get entrypoints", skip_all)]
1036#[napi(ts_return_type = "{ __napiType: \"RootTask\" }")]
1037pub fn project_entrypoints_subscribe(
1038    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1039    func: JsFunction,
1040) -> napi::Result<External<RootTask>> {
1041    let turbopack_ctx = project.turbopack_ctx.clone();
1042    let container = project.container;
1043    subscribe(
1044        turbopack_ctx.clone(),
1045        func,
1046        move || {
1047            async move {
1048                let entrypoints_with_issues_op = get_entrypoints_with_issues_operation(container);
1049                let EntrypointsWithIssues {
1050                    entrypoints,
1051                    issues,
1052                    diagnostics,
1053                    effects,
1054                } = &*entrypoints_with_issues_op
1055                    .read_strongly_consistent()
1056                    .await?;
1057
1058                effects.apply().await?;
1059                Ok((entrypoints.clone(), issues.clone(), diagnostics.clone()))
1060            }
1061            .instrument(tracing::info_span!("entrypoints subscription"))
1062        },
1063        move |ctx| {
1064            let (entrypoints, issues, diags) = ctx.value;
1065            let result = match entrypoints {
1066                Some(entrypoints) => Some(NapiEntrypoints::from_entrypoints_op(
1067                    &entrypoints,
1068                    &turbopack_ctx,
1069                )?),
1070                None => None,
1071            };
1072
1073            Ok(vec![TurbopackResult {
1074                result,
1075                issues: issues
1076                    .iter()
1077                    .map(|issue| NapiIssue::from(&**issue))
1078                    .collect(),
1079                diagnostics: diags.iter().map(|d| NapiDiagnostic::from(d)).collect(),
1080            }])
1081        },
1082    )
1083}
1084
1085#[turbo_tasks::value(serialization = "none")]
1086struct HmrUpdateWithIssues {
1087    update: ReadRef<Update>,
1088    issues: Arc<Vec<ReadRef<PlainIssue>>>,
1089    diagnostics: Arc<Vec<ReadRef<PlainDiagnostic>>>,
1090    effects: Arc<Effects>,
1091}
1092
1093#[turbo_tasks::function(operation)]
1094async fn hmr_update_with_issues_operation(
1095    project: ResolvedVc<Project>,
1096    identifier: RcStr,
1097    state: ResolvedVc<VersionState>,
1098) -> Result<Vc<HmrUpdateWithIssues>> {
1099    let update_op = project_hmr_update_operation(project, identifier, state);
1100    let update = update_op.read_strongly_consistent().await?;
1101    let issues = get_issues(update_op).await?;
1102    let diagnostics = get_diagnostics(update_op).await?;
1103    let effects = Arc::new(get_effects(update_op).await?);
1104    Ok(HmrUpdateWithIssues {
1105        update,
1106        issues,
1107        diagnostics,
1108        effects,
1109    }
1110    .cell())
1111}
1112
1113#[turbo_tasks::function(operation)]
1114fn project_hmr_update_operation(
1115    project: ResolvedVc<Project>,
1116    identifier: RcStr,
1117    state: ResolvedVc<VersionState>,
1118) -> Vc<Update> {
1119    project.hmr_update(identifier, *state)
1120}
1121
1122#[tracing::instrument(level = "info", name = "get HMR events", skip(project, func))]
1123#[napi(ts_return_type = "{ __napiType: \"RootTask\" }")]
1124pub fn project_hmr_events(
1125    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1126    identifier: RcStr,
1127    func: JsFunction,
1128) -> napi::Result<External<RootTask>> {
1129    let container = project.container;
1130    let session = TransientInstance::new(());
1131    subscribe(
1132        project.turbopack_ctx.clone(),
1133        func,
1134        {
1135            let outer_identifier = identifier.clone();
1136            let session = session.clone();
1137            move || {
1138                let identifier: RcStr = outer_identifier.clone();
1139                let session = session.clone();
1140                async move {
1141                    let project = container.project().to_resolved().await?;
1142                    let state = project
1143                        .hmr_version_state(identifier.clone(), session)
1144                        .to_resolved()
1145                        .await?;
1146
1147                    let update_op =
1148                        hmr_update_with_issues_operation(project, identifier.clone(), state);
1149                    let update = update_op.read_strongly_consistent().await?;
1150                    let HmrUpdateWithIssues {
1151                        update,
1152                        issues,
1153                        diagnostics,
1154                        effects,
1155                    } = &*update;
1156                    effects.apply().await?;
1157                    match &**update {
1158                        Update::Missing | Update::None => {}
1159                        Update::Total(TotalUpdate { to }) => {
1160                            state.set(to.clone()).await?;
1161                        }
1162                        Update::Partial(PartialUpdate { to, .. }) => {
1163                            state.set(to.clone()).await?;
1164                        }
1165                    }
1166                    Ok((Some(update.clone()), issues.clone(), diagnostics.clone()))
1167                }
1168            }
1169        },
1170        move |ctx| {
1171            let (update, issues, diags) = ctx.value;
1172
1173            let napi_issues = issues
1174                .iter()
1175                .map(|issue| NapiIssue::from(&**issue))
1176                .collect();
1177            let update_issues = issues
1178                .iter()
1179                .map(|issue| Issue::from(&**issue))
1180                .collect::<Vec<_>>();
1181
1182            let identifier = ResourceIdentifier {
1183                path: identifier.clone(),
1184                headers: None,
1185            };
1186            let update = match update.as_deref() {
1187                None | Some(Update::Missing) | Some(Update::Total(_)) => {
1188                    ClientUpdateInstruction::restart(&identifier, &update_issues)
1189                }
1190                Some(Update::Partial(update)) => ClientUpdateInstruction::partial(
1191                    &identifier,
1192                    &update.instruction,
1193                    &update_issues,
1194                ),
1195                Some(Update::None) => ClientUpdateInstruction::issues(&identifier, &update_issues),
1196            };
1197
1198            Ok(vec![TurbopackResult {
1199                result: ctx.env.to_js_value(&update)?,
1200                issues: napi_issues,
1201                diagnostics: diags.iter().map(|d| NapiDiagnostic::from(d)).collect(),
1202            }])
1203        },
1204    )
1205}
1206
1207#[napi(object)]
1208struct HmrIdentifiers {
1209    pub identifiers: Vec<RcStr>,
1210}
1211
1212#[turbo_tasks::value(serialization = "none")]
1213struct HmrIdentifiersWithIssues {
1214    identifiers: ReadRef<Vec<RcStr>>,
1215    issues: Arc<Vec<ReadRef<PlainIssue>>>,
1216    diagnostics: Arc<Vec<ReadRef<PlainDiagnostic>>>,
1217    effects: Arc<Effects>,
1218}
1219
1220#[turbo_tasks::function(operation)]
1221async fn get_hmr_identifiers_with_issues_operation(
1222    container: ResolvedVc<ProjectContainer>,
1223) -> Result<Vc<HmrIdentifiersWithIssues>> {
1224    let hmr_identifiers_op = project_container_hmr_identifiers_operation(container);
1225    let hmr_identifiers = hmr_identifiers_op.read_strongly_consistent().await?;
1226    let issues = get_issues(hmr_identifiers_op).await?;
1227    let diagnostics = get_diagnostics(hmr_identifiers_op).await?;
1228    let effects = Arc::new(get_effects(hmr_identifiers_op).await?);
1229    Ok(HmrIdentifiersWithIssues {
1230        identifiers: hmr_identifiers,
1231        issues,
1232        diagnostics,
1233        effects,
1234    }
1235    .cell())
1236}
1237
1238#[turbo_tasks::function(operation)]
1239fn project_container_hmr_identifiers_operation(
1240    container: ResolvedVc<ProjectContainer>,
1241) -> Vc<Vec<RcStr>> {
1242    container.hmr_identifiers()
1243}
1244
1245#[tracing::instrument(level = "info", name = "get HMR identifiers", skip_all)]
1246#[napi(ts_return_type = "{ __napiType: \"RootTask\" }")]
1247pub fn project_hmr_identifiers_subscribe(
1248    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1249    func: JsFunction,
1250) -> napi::Result<External<RootTask>> {
1251    let container = project.container;
1252    subscribe(
1253        project.turbopack_ctx.clone(),
1254        func,
1255        move || async move {
1256            let hmr_identifiers_with_issues_op =
1257                get_hmr_identifiers_with_issues_operation(container);
1258            let HmrIdentifiersWithIssues {
1259                identifiers,
1260                issues,
1261                diagnostics,
1262                effects,
1263            } = &*hmr_identifiers_with_issues_op
1264                .read_strongly_consistent()
1265                .await?;
1266            effects.apply().await?;
1267
1268            Ok((identifiers.clone(), issues.clone(), diagnostics.clone()))
1269        },
1270        move |ctx| {
1271            let (identifiers, issues, diagnostics) = ctx.value;
1272
1273            Ok(vec![TurbopackResult {
1274                result: HmrIdentifiers {
1275                    identifiers: ReadRef::into_owned(identifiers),
1276                },
1277                issues: issues
1278                    .iter()
1279                    .map(|issue| NapiIssue::from(&**issue))
1280                    .collect(),
1281                diagnostics: diagnostics
1282                    .iter()
1283                    .map(|d| NapiDiagnostic::from(d))
1284                    .collect(),
1285            }])
1286        },
1287    )
1288}
1289
1290pub enum UpdateMessage {
1291    Start,
1292    End(UpdateInfo),
1293}
1294
1295#[napi(object)]
1296struct NapiUpdateMessage {
1297    pub update_type: &'static str,
1298    pub value: Option<NapiUpdateInfo>,
1299}
1300
1301impl From<UpdateMessage> for NapiUpdateMessage {
1302    fn from(update_message: UpdateMessage) -> Self {
1303        match update_message {
1304            UpdateMessage::Start => NapiUpdateMessage {
1305                update_type: "start",
1306                value: None,
1307            },
1308            UpdateMessage::End(info) => NapiUpdateMessage {
1309                update_type: "end",
1310                value: Some(info.into()),
1311            },
1312        }
1313    }
1314}
1315
1316#[napi(object)]
1317struct NapiUpdateInfo {
1318    pub duration: u32,
1319    pub tasks: u32,
1320}
1321
1322impl From<UpdateInfo> for NapiUpdateInfo {
1323    fn from(update_info: UpdateInfo) -> Self {
1324        Self {
1325            duration: update_info.duration.as_millis() as u32,
1326            tasks: update_info.tasks as u32,
1327        }
1328    }
1329}
1330
1331/// Subscribes to lifecycle events of the compilation.
1332///
1333/// Emits an [UpdateMessage::Start] event when any computation starts.
1334/// Emits an [UpdateMessage::End] event when there was no computation for the
1335/// specified time (`aggregation_ms`). The [UpdateMessage::End] event contains
1336/// information about the computations that happened since the
1337/// [UpdateMessage::Start] event. It contains the duration of the computation
1338/// (excluding the idle time that was spend waiting for `aggregation_ms`), and
1339/// the number of tasks that were executed.
1340///
1341/// The signature of the `func` is `(update_message: UpdateMessage) => void`.
1342#[napi]
1343pub fn project_update_info_subscribe(
1344    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1345    aggregation_ms: u32,
1346    func: JsFunction,
1347) -> napi::Result<()> {
1348    let func: ThreadsafeFunction<UpdateMessage> = func.create_threadsafe_function(0, |ctx| {
1349        let message = ctx.value;
1350        Ok(vec![NapiUpdateMessage::from(message)])
1351    })?;
1352    tokio::spawn(async move {
1353        let tt = project.turbopack_ctx.turbo_tasks();
1354        loop {
1355            let update_info = tt
1356                .aggregated_update_info(Duration::ZERO, Duration::ZERO)
1357                .await;
1358
1359            func.call(
1360                Ok(UpdateMessage::Start),
1361                ThreadsafeFunctionCallMode::NonBlocking,
1362            );
1363
1364            let update_info = match update_info {
1365                Some(update_info) => update_info,
1366                None => {
1367                    tt.get_or_wait_aggregated_update_info(Duration::from_millis(
1368                        aggregation_ms.into(),
1369                    ))
1370                    .await
1371                }
1372            };
1373
1374            let status = func.call(
1375                Ok(UpdateMessage::End(update_info)),
1376                ThreadsafeFunctionCallMode::NonBlocking,
1377            );
1378
1379            if !matches!(status, Status::Ok) {
1380                let error = anyhow!("Error calling JS function: {}", status);
1381                eprintln!("{error}");
1382                break;
1383            }
1384        }
1385    });
1386    Ok(())
1387}
1388
1389/// Subscribes to all compilation events that are not cached like timing and progress information.
1390#[napi]
1391pub fn project_compilation_events_subscribe(
1392    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1393    func: JsFunction,
1394    event_types: Option<Vec<String>>,
1395) -> napi::Result<()> {
1396    let tsfn: ThreadsafeFunction<Arc<dyn CompilationEvent>> =
1397        func.create_threadsafe_function(0, |ctx| {
1398            let event: Arc<dyn CompilationEvent> = ctx.value;
1399
1400            let env = ctx.env;
1401            let mut obj = env.create_object()?;
1402            obj.set_named_property("typeName", event.type_name())?;
1403            obj.set_named_property("severity", event.severity().to_string())?;
1404            obj.set_named_property("message", event.message())?;
1405
1406            let external = env.create_external(event, None);
1407            obj.set_named_property("eventData", external)?;
1408
1409            Ok(vec![obj])
1410        })?;
1411
1412    tokio::spawn(async move {
1413        let tt = project.turbopack_ctx.turbo_tasks();
1414        let mut receiver = tt.subscribe_to_compilation_events(event_types);
1415        while let Some(msg) = receiver.recv().await {
1416            let status = tsfn.call(Ok(msg), ThreadsafeFunctionCallMode::Blocking);
1417
1418            if status != Status::Ok {
1419                break;
1420            }
1421        }
1422    });
1423
1424    Ok(())
1425}
1426
1427#[napi(object)]
1428#[derive(
1429    Clone,
1430    Debug,
1431    Deserialize,
1432    Eq,
1433    Hash,
1434    NonLocalValue,
1435    OperationValue,
1436    PartialEq,
1437    Serialize,
1438    TaskInput,
1439    TraceRawVcs,
1440)]
1441pub struct StackFrame {
1442    pub is_server: bool,
1443    pub is_internal: Option<bool>,
1444    pub original_file: Option<RcStr>,
1445    pub file: RcStr,
1446    /// 1-indexed, unlike source map tokens
1447    pub line: Option<u32>,
1448    /// 1-indexed, unlike source map tokens
1449    pub column: Option<u32>,
1450    pub method_name: Option<RcStr>,
1451}
1452
1453#[turbo_tasks::value(transparent)]
1454#[derive(Clone)]
1455pub struct OptionStackFrame(Option<StackFrame>);
1456
1457#[turbo_tasks::function]
1458pub async fn get_source_map_rope(
1459    container: Vc<ProjectContainer>,
1460    source_url: RcStr,
1461) -> Result<Vc<OptionStringifiedSourceMap>> {
1462    let (file_path_sys, module) = match Url::parse(&source_url) {
1463        Ok(url) => match url.scheme() {
1464            "file" => {
1465                let path = match url.to_file_path() {
1466                    Ok(path) => path.to_string_lossy().into(),
1467                    Err(_) => {
1468                        bail!("Failed to convert file URL to file path: {url}");
1469                    }
1470                };
1471                let module = url.query_pairs().find(|(k, _)| k == "id");
1472                (
1473                    path,
1474                    match module {
1475                        Some(module) => Some(urlencoding::decode(&module.1)?.into_owned().into()),
1476                        None => None,
1477                    },
1478                )
1479            }
1480            _ => bail!("Unknown url scheme '{}'", url.scheme()),
1481        },
1482        Err(_) => (source_url.to_string(), None),
1483    };
1484
1485    let chunk_base_unix =
1486        match file_path_sys.strip_prefix(container.project().dist_dir_absolute().await?.as_str()) {
1487            Some(relative_path) => sys_to_unix(relative_path),
1488            None => {
1489                // File doesn't exist within the dist dir
1490                return Ok(OptionStringifiedSourceMap::none());
1491            }
1492        };
1493
1494    let server_path = container
1495        .project()
1496        .node_root()
1497        .await?
1498        .join(&chunk_base_unix)?;
1499
1500    let client_path = container
1501        .project()
1502        .client_relative_path()
1503        .await?
1504        .join(&chunk_base_unix)?;
1505
1506    let mut map = container.get_source_map(server_path, module.clone());
1507
1508    if map.await?.is_none() {
1509        // If the chunk doesn't exist as a server chunk, try a client chunk.
1510        // TODO: Properly tag all server chunks and use the `isServer` query param.
1511        // Currently, this is inaccurate as it does not cover RSC server
1512        // chunks.
1513        map = container.get_source_map(client_path, module);
1514        if map.await?.is_none() {
1515            bail!("chunk/module '{}' is missing a sourcemap", source_url);
1516        }
1517    }
1518
1519    Ok(map)
1520}
1521
1522#[turbo_tasks::function(operation)]
1523pub fn get_source_map_rope_operation(
1524    container: ResolvedVc<ProjectContainer>,
1525    file_path: RcStr,
1526) -> Vc<OptionStringifiedSourceMap> {
1527    get_source_map_rope(*container, file_path)
1528}
1529
1530#[turbo_tasks::function(operation)]
1531pub async fn project_trace_source_operation(
1532    container: ResolvedVc<ProjectContainer>,
1533    frame: StackFrame,
1534    current_directory_file_url: RcStr,
1535) -> Result<Vc<OptionStackFrame>> {
1536    let Some(map) =
1537        &*SourceMap::new_from_rope_cached(get_source_map_rope(*container, frame.file)).await?
1538    else {
1539        return Ok(Vc::cell(None));
1540    };
1541
1542    let Some(line) = frame.line else {
1543        return Ok(Vc::cell(None));
1544    };
1545
1546    let token = map.lookup_token(
1547        line.saturating_sub(1),
1548        frame.column.unwrap_or(1).saturating_sub(1),
1549    );
1550
1551    let (original_file, line, column, method_name) = match token {
1552        Token::Original(token) => (
1553            match urlencoding::decode(&token.original_file)? {
1554                Cow::Borrowed(_) => token.original_file,
1555                Cow::Owned(original_file) => RcStr::from(original_file),
1556            },
1557            // JS stack frames are 1-indexed, source map tokens are 0-indexed
1558            Some(token.original_line + 1),
1559            Some(token.original_column + 1),
1560            token.name,
1561        ),
1562        Token::Synthetic(token) => {
1563            let Some(original_file) = token.guessed_original_file else {
1564                return Ok(Vc::cell(None));
1565            };
1566            (original_file, None, None, None)
1567        }
1568    };
1569
1570    let project_root_uri =
1571        uri_from_file(container.project().project_root_path().owned().await?, None).await? + "/";
1572    let (file, original_file, is_internal) =
1573        if let Some(source_file) = original_file.strip_prefix(&project_root_uri) {
1574            // Client code uses file://
1575            (
1576                RcStr::from(
1577                    get_relative_path_to(&current_directory_file_url, &original_file)
1578                        // TODO(sokra) remove this to include a ./ here to make it a relative path
1579                        .trim_start_matches("./"),
1580                ),
1581                Some(RcStr::from(source_file)),
1582                false,
1583            )
1584        } else if let Some(source_file) = original_file.strip_prefix(&*SOURCE_MAP_PREFIX_PROJECT) {
1585            // Server code uses turbopack:///[project]
1586            // TODO should this also be file://?
1587            (
1588                RcStr::from(
1589                    get_relative_path_to(
1590                        &current_directory_file_url,
1591                        &format!("{project_root_uri}{source_file}"),
1592                    )
1593                    // TODO(sokra) remove this to include a ./ here to make it a relative path
1594                    .trim_start_matches("./"),
1595                ),
1596                Some(RcStr::from(source_file)),
1597                false,
1598            )
1599        } else if let Some(source_file) = original_file.strip_prefix(&*SOURCE_MAP_PREFIX) {
1600            // All other code like turbopack:///[turbopack] is internal code
1601            // TODO(veil): Should the protocol be preserved?
1602            (RcStr::from(source_file), None, true)
1603        } else {
1604            bail!(
1605                "Original file ({}) outside project ({})",
1606                original_file,
1607                project_root_uri
1608            )
1609        };
1610
1611    Ok(Vc::cell(Some(StackFrame {
1612        file,
1613        original_file,
1614        method_name,
1615        line,
1616        column,
1617        is_server: frame.is_server,
1618        is_internal: Some(is_internal),
1619    })))
1620}
1621
1622#[tracing::instrument(level = "info", name = "apply SourceMap to stack frame", skip_all)]
1623#[napi]
1624pub async fn project_trace_source(
1625    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1626    frame: StackFrame,
1627    current_directory_file_url: String,
1628) -> napi::Result<Option<StackFrame>> {
1629    let container = project.container;
1630    let ctx = &project.turbopack_ctx;
1631    ctx.turbo_tasks()
1632        .run(async move {
1633            let traced_frame = project_trace_source_operation(
1634                container,
1635                frame,
1636                RcStr::from(current_directory_file_url),
1637            )
1638            .read_strongly_consistent()
1639            .await?;
1640            Ok(ReadRef::into_owned(traced_frame))
1641        })
1642        // HACK: Don't use `TurbopackInternalError`, this function is race-condition prone (the
1643        // source files may have changed or been deleted), so these probably aren't internal errors?
1644        // Ideally we should differentiate.
1645        .await
1646        .map_err(|e| napi::Error::from_reason(PrettyPrintError(&e.into()).to_string()))
1647}
1648
1649#[tracing::instrument(level = "info", name = "get source content for asset", skip_all)]
1650#[napi]
1651pub async fn project_get_source_for_asset(
1652    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1653    file_path: RcStr,
1654) -> napi::Result<Option<String>> {
1655    let container = project.container;
1656    let ctx = &project.turbopack_ctx;
1657    ctx.turbo_tasks()
1658        .run(async move {
1659            let source_content = &*container
1660                .project()
1661                .project_path()
1662                .await?
1663                .fs()
1664                .root()
1665                .await?
1666                .join(&file_path)?
1667                .read()
1668                .await?;
1669
1670            let FileContent::Content(source_content) = source_content else {
1671                bail!("Cannot find source for asset {}", file_path);
1672            };
1673
1674            Ok(Some(source_content.content().to_str()?.into_owned()))
1675        })
1676        // HACK: Don't use `TurbopackInternalError`, this function is race-condition prone (the
1677        // source files may have changed or been deleted), so these probably aren't internal errors?
1678        // Ideally we should differentiate.
1679        .await
1680        .map_err(|e| napi::Error::from_reason(PrettyPrintError(&e.into()).to_string()))
1681}
1682
1683#[tracing::instrument(level = "info", name = "get SourceMap for asset", skip_all)]
1684#[napi]
1685pub async fn project_get_source_map(
1686    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1687    file_path: RcStr,
1688) -> napi::Result<Option<String>> {
1689    let container = project.container;
1690    let ctx = &project.turbopack_ctx;
1691    ctx.turbo_tasks()
1692        .run(async move {
1693            let Some(map) = &*get_source_map_rope_operation(container, file_path)
1694                .read_strongly_consistent()
1695                .await?
1696            else {
1697                return Ok(None);
1698            };
1699            Ok(Some(map.to_str()?.to_string()))
1700        })
1701        // HACK: Don't use `TurbopackInternalError`, this function is race-condition prone (the
1702        // source files may have changed or been deleted), so these probably aren't internal errors?
1703        // Ideally we should differentiate.
1704        .await
1705        .map_err(|e| napi::Error::from_reason(PrettyPrintError(&e.into()).to_string()))
1706}
1707
1708#[napi]
1709pub fn project_get_source_map_sync(
1710    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1711    file_path: RcStr,
1712) -> napi::Result<Option<String>> {
1713    within_runtime_if_available(|| {
1714        tokio::runtime::Handle::current().block_on(project_get_source_map(project, file_path))
1715    })
1716}