next_swc_napi/next_api/
project.rs

1use std::{borrow::Cow, io::Write, path::PathBuf, sync::Arc, thread, time::Duration};
2
3use anyhow::{Context, Result, anyhow, bail};
4use bincode::{Decode, Encode};
5use flate2::write::GzEncoder;
6use futures_util::TryFutureExt;
7use napi::{
8    Env, JsFunction, JsObject, Status,
9    bindgen_prelude::{External, within_runtime_if_available},
10    threadsafe_function::{ThreadsafeFunction, ThreadsafeFunctionCallMode},
11};
12use napi_derive::napi;
13use next_api::{
14    entrypoints::Entrypoints,
15    next_server_nft::next_server_nft_assets,
16    operation::{
17        EntrypointsOperation, InstrumentationOperation, MiddlewareOperation, OptionEndpoint,
18        RouteOperation,
19    },
20    project::{
21        DebugBuildPaths, DefineEnv, DraftModeOptions, PartialProjectOptions, Project,
22        ProjectContainer, ProjectOptions, WatchOptions,
23    },
24    route::Endpoint,
25    routes_hashes_manifest::routes_hashes_manifest_asset_if_enabled,
26};
27use next_core::tracing_presets::{
28    TRACING_NEXT_OVERVIEW_TARGETS, TRACING_NEXT_TARGETS, TRACING_NEXT_TURBO_TASKS_TARGETS,
29    TRACING_NEXT_TURBOPACK_TARGETS,
30};
31use once_cell::sync::Lazy;
32use rand::Rng;
33use serde::Serialize;
34use tokio::{io::AsyncWriteExt, runtime::Handle, time::Instant};
35use tracing::Instrument;
36use tracing_subscriber::{Registry, layer::SubscriberExt, util::SubscriberInitExt};
37use turbo_rcstr::{RcStr, rcstr};
38use turbo_tasks::{
39    Effects, FxIndexSet, NonLocalValue, OperationValue, OperationVc, ReadRef, ResolvedVc,
40    TaskInput, TransientInstance, TryJoinIterExt, TurboTasksApi, UpdateInfo, Vc, get_effects,
41    message_queue::{CompilationEvent, Severity},
42    trace::TraceRawVcs,
43};
44use turbo_tasks_backend::{BackingStorage, db_invalidation::invalidation_reasons};
45use turbo_tasks_fs::{
46    DiskFileSystem, FileContent, FileSystem, FileSystemPath, util::uri_from_file,
47};
48use turbo_unix_path::{get_relative_path_to, sys_to_unix};
49use turbopack_core::{
50    PROJECT_FILESYSTEM_NAME, SOURCE_URL_PROTOCOL,
51    diagnostics::PlainDiagnostic,
52    error::PrettyPrintError,
53    issue::{IssueFilter, PlainIssue},
54    output::{OutputAsset, OutputAssets},
55    source_map::{SourceMap, Token},
56    version::{PartialUpdate, TotalUpdate, Update, VersionState},
57};
58use turbopack_ecmascript_hmr_protocol::{ClientUpdateInstruction, Issue, ResourceIdentifier};
59use turbopack_trace_utils::{
60    exit::{ExitHandler, ExitReceiver},
61    filter_layer::FilterLayer,
62    raw_trace::RawTraceLayer,
63    trace_writer::TraceWriter,
64};
65use url::Url;
66
67use crate::{
68    next_api::{
69        analyze::{WriteAnalyzeResult, write_analyze_data_with_issues_operation},
70        endpoint::ExternalEndpoint,
71        turbopack_ctx::{
72            NapiNextTurbopackCallbacks, NapiNextTurbopackCallbacksJsObject, NextTurboTasks,
73            NextTurbopackContext, create_turbo_tasks,
74        },
75        utils::{
76            DetachedVc, NapiDiagnostic, NapiIssue, RootTask, TurbopackResult, get_diagnostics,
77            get_issues, strongly_consistent_catch_collectables, subscribe,
78        },
79    },
80    util::DhatProfilerGuard,
81};
82
83/// Used by [`benchmark_file_io`]. This is a noisy benchmark, so set the
84/// threshold high.
85const SLOW_FILESYSTEM_THRESHOLD: Duration = Duration::from_millis(100);
86static SOURCE_MAP_PREFIX: Lazy<String> = Lazy::new(|| format!("{SOURCE_URL_PROTOCOL}///"));
87static SOURCE_MAP_PREFIX_PROJECT: Lazy<String> =
88    Lazy::new(|| format!("{SOURCE_URL_PROTOCOL}///[{PROJECT_FILESYSTEM_NAME}]/"));
89
90/// Next doesn't display warnings from node_modules, so configure turbopack to not report them
91/// either. This matches logic in `packages/next/src/server/dev/turbopack-utils.ts`
92pub const NEXT_ISSUE_FILTER: IssueFilter = IssueFilter::warnings_and_foreign_errors();
93
94#[napi(object)]
95#[derive(Clone, Debug)]
96pub struct NapiEnvVar {
97    pub name: RcStr,
98    pub value: RcStr,
99}
100
101#[napi(object)]
102#[derive(Clone, Debug)]
103pub struct NapiOptionEnvVar {
104    pub name: RcStr,
105    pub value: Option<RcStr>,
106}
107
108#[napi(object)]
109pub struct NapiDraftModeOptions {
110    pub preview_mode_id: RcStr,
111    pub preview_mode_encryption_key: RcStr,
112    pub preview_mode_signing_key: RcStr,
113}
114
115impl From<NapiDraftModeOptions> for DraftModeOptions {
116    fn from(val: NapiDraftModeOptions) -> Self {
117        DraftModeOptions {
118            preview_mode_id: val.preview_mode_id,
119            preview_mode_encryption_key: val.preview_mode_encryption_key,
120            preview_mode_signing_key: val.preview_mode_signing_key,
121        }
122    }
123}
124
125#[napi(object)]
126pub struct NapiWatchOptions {
127    /// Whether to watch the filesystem for file changes.
128    pub enable: bool,
129
130    /// Enable polling at a certain interval if the native file watching doesn't work (e.g.
131    /// docker).
132    pub poll_interval_ms: Option<f64>,
133}
134
135#[napi(object)]
136pub struct NapiProjectOptions {
137    /// An absolute root path (Unix or Windows path) from which all files must be nested under.
138    /// Trying to access a file outside this root will fail, so think of this as a chroot.
139    /// E.g. `/home/user/projects/my-repo`.
140    pub root_path: RcStr,
141
142    /// A path which contains the app/pages directories, relative to [`Project::root_path`], always
143    /// Unix path. E.g. `apps/my-app`
144    pub project_path: RcStr,
145
146    /// A path where tracing output will be written to and/or cache is read/written.
147    /// Usually equal to the `distDir` in next.config.js.
148    /// E.g. `.next`
149    pub dist_dir: RcStr,
150
151    /// Filesystem watcher options.
152    pub watch: NapiWatchOptions,
153
154    /// The contents of next.config.js, serialized to JSON.
155    pub next_config: RcStr,
156
157    /// A map of environment variables to use when compiling code.
158    pub env: Vec<NapiEnvVar>,
159
160    /// A map of environment variables which should get injected at compile
161    /// time.
162    pub define_env: NapiDefineEnv,
163
164    /// The mode in which Next.js is running.
165    pub dev: bool,
166
167    /// The server actions encryption key.
168    pub encryption_key: RcStr,
169
170    /// The build id.
171    pub build_id: RcStr,
172
173    /// Options for draft mode.
174    pub preview_props: NapiDraftModeOptions,
175
176    /// The browserslist query to use for targeting browsers.
177    pub browserslist_query: RcStr,
178
179    /// When the code is minified, this opts out of the default mangling of
180    /// local names for variables, functions etc., which can be useful for
181    /// debugging/profiling purposes.
182    pub no_mangling: bool,
183
184    /// Whether to write the route hashes manifest.
185    pub write_routes_hashes_manifest: bool,
186
187    /// The version of Node.js that is available/currently running.
188    pub current_node_js_version: RcStr,
189
190    /// Debug build paths for selective builds.
191    /// When set, only routes matching these paths will be included in the build.
192    pub debug_build_paths: Option<NapiDebugBuildPaths>,
193}
194
195/// [NapiProjectOptions] with all fields optional.
196#[napi(object)]
197pub struct NapiPartialProjectOptions {
198    /// An absolute root path  (Unix or Windows path) from which all files must be nested under.
199    /// Trying to access a file outside this root will fail, so think of this as a chroot.
200    /// E.g. `/home/user/projects/my-repo`.
201    pub root_path: Option<RcStr>,
202
203    /// A path which contains the app/pages directories, relative to [`Project::root_path`], always
204    /// a Unix path.
205    /// E.g. `apps/my-app`
206    pub project_path: Option<RcStr>,
207
208    /// Filesystem watcher options.
209    pub watch: Option<NapiWatchOptions>,
210
211    /// The contents of next.config.js, serialized to JSON.
212    pub next_config: Option<RcStr>,
213
214    /// A map of environment variables to use when compiling code.
215    pub env: Option<Vec<NapiEnvVar>>,
216
217    /// A map of environment variables which should get injected at compile
218    /// time.
219    pub define_env: Option<NapiDefineEnv>,
220
221    /// The mode in which Next.js is running.
222    pub dev: Option<bool>,
223
224    /// The server actions encryption key.
225    pub encryption_key: Option<RcStr>,
226
227    /// The build id.
228    pub build_id: Option<RcStr>,
229
230    /// Options for draft mode.
231    pub preview_props: Option<NapiDraftModeOptions>,
232
233    /// The browserslist query to use for targeting browsers.
234    pub browserslist_query: Option<RcStr>,
235
236    /// Whether to write the route hashes manifest.
237    pub write_routes_hashes_manifest: Option<bool>,
238
239    /// When the code is minified, this opts out of the default mangling of
240    /// local names for variables, functions etc., which can be useful for
241    /// debugging/profiling purposes.
242    pub no_mangling: Option<bool>,
243}
244
245#[napi(object)]
246#[derive(Clone, Debug)]
247pub struct NapiDefineEnv {
248    pub client: Vec<NapiOptionEnvVar>,
249    pub edge: Vec<NapiOptionEnvVar>,
250    pub nodejs: Vec<NapiOptionEnvVar>,
251}
252
253#[napi(object)]
254pub struct NapiTurboEngineOptions {
255    /// Use the new backend with filesystem cache enabled.
256    pub persistent_caching: Option<bool>,
257    /// An upper bound of memory that turbopack will attempt to stay under.
258    pub memory_limit: Option<f64>,
259    /// Track dependencies between tasks. If false, any change during build will error.
260    pub dependency_tracking: Option<bool>,
261    /// Whether the project is running in a CI environment.
262    pub is_ci: Option<bool>,
263    /// Whether the project is running in a short session.
264    pub is_short_session: Option<bool>,
265}
266
267impl From<NapiWatchOptions> for WatchOptions {
268    fn from(val: NapiWatchOptions) -> Self {
269        WatchOptions {
270            enable: val.enable,
271            poll_interval: val
272                .poll_interval_ms
273                .filter(|interval| !interval.is_nan() && interval.is_finite() && *interval > 0.0)
274                .map(|interval| Duration::from_secs_f64(interval / 1000.0)),
275        }
276    }
277}
278
279impl From<NapiProjectOptions> for ProjectOptions {
280    fn from(val: NapiProjectOptions) -> Self {
281        let NapiProjectOptions {
282            root_path,
283            project_path,
284            // Only used for initializing cache and tracing
285            dist_dir: _,
286            watch,
287            next_config,
288            env,
289            define_env,
290            dev,
291            encryption_key,
292            build_id,
293            preview_props,
294            browserslist_query,
295            no_mangling,
296            write_routes_hashes_manifest,
297            current_node_js_version,
298            debug_build_paths,
299        } = val;
300        ProjectOptions {
301            root_path,
302            project_path,
303            watch: watch.into(),
304            next_config,
305            env: env.into_iter().map(|var| (var.name, var.value)).collect(),
306            define_env: define_env.into(),
307            dev,
308            encryption_key,
309            build_id,
310            preview_props: preview_props.into(),
311            browserslist_query,
312            no_mangling,
313            write_routes_hashes_manifest,
314            current_node_js_version,
315            debug_build_paths: debug_build_paths.map(|p| DebugBuildPaths {
316                app: p.app,
317                pages: p.pages,
318            }),
319        }
320    }
321}
322
323impl From<NapiPartialProjectOptions> for PartialProjectOptions {
324    fn from(val: NapiPartialProjectOptions) -> Self {
325        let NapiPartialProjectOptions {
326            root_path,
327            project_path,
328            watch,
329            next_config,
330            env,
331            define_env,
332            dev,
333            encryption_key,
334            build_id,
335            preview_props,
336            browserslist_query,
337            no_mangling,
338            write_routes_hashes_manifest,
339        } = val;
340        PartialProjectOptions {
341            root_path,
342            project_path,
343            watch: watch.map(From::from),
344            next_config,
345            env: env.map(|env| env.into_iter().map(|var| (var.name, var.value)).collect()),
346            define_env: define_env.map(|env| env.into()),
347            dev,
348            encryption_key,
349            build_id,
350            preview_props: preview_props.map(|props| props.into()),
351            browserslist_query,
352            no_mangling,
353            write_routes_hashes_manifest,
354            debug_build_paths: None,
355        }
356    }
357}
358
359impl From<NapiDefineEnv> for DefineEnv {
360    fn from(val: NapiDefineEnv) -> Self {
361        DefineEnv {
362            client: val
363                .client
364                .into_iter()
365                .map(|var| (var.name, var.value))
366                .collect(),
367            edge: val
368                .edge
369                .into_iter()
370                .map(|var| (var.name, var.value))
371                .collect(),
372            nodejs: val
373                .nodejs
374                .into_iter()
375                .map(|var| (var.name, var.value))
376                .collect(),
377        }
378    }
379}
380
381pub struct ProjectInstance {
382    turbopack_ctx: NextTurbopackContext,
383    container: ResolvedVc<ProjectContainer>,
384    exit_receiver: tokio::sync::Mutex<Option<ExitReceiver>>,
385}
386
387#[napi(ts_return_type = "Promise<{ __napiType: \"Project\" }>")]
388pub fn project_new(
389    env: Env,
390    options: NapiProjectOptions,
391    turbo_engine_options: NapiTurboEngineOptions,
392    napi_callbacks: NapiNextTurbopackCallbacksJsObject,
393) -> napi::Result<JsObject> {
394    let napi_callbacks = NapiNextTurbopackCallbacks::from_js(napi_callbacks)?;
395    let (exit, exit_receiver) = ExitHandler::new_receiver();
396
397    if let Some(dhat_profiler) = DhatProfilerGuard::try_init() {
398        exit.on_exit(async move {
399            tokio::task::spawn_blocking(move || drop(dhat_profiler))
400                .await
401                .unwrap()
402        });
403    }
404
405    let mut trace = std::env::var("NEXT_TURBOPACK_TRACING")
406        .ok()
407        .filter(|v| !v.is_empty());
408
409    if cfg!(feature = "tokio-console") && trace.is_none() {
410        // ensure `trace` is set to *something* so that the `tokio-console` feature works,
411        // otherwise you just get empty output from `tokio-console`, which can be
412        // confusing.
413        trace = Some("overview".to_owned());
414    }
415
416    enum Compression {
417        None,
418        GzipFast,
419        GzipBest,
420    }
421    let mut compress = Compression::None;
422    if let Some(mut trace) = trace {
423        let internal_dir = PathBuf::from(&options.root_path)
424            .join(&options.project_path)
425            .join(&options.dist_dir);
426        let trace_file = internal_dir.join("trace-turbopack");
427
428        println!("Turbopack tracing enabled with targets: {trace}");
429        println!("  Note that this might have a small performance impact.");
430        println!("  Trace output will be written to {}", trace_file.display());
431
432        trace = trace
433            .split(",")
434            .filter_map(|item| {
435                // Trace presets
436                Some(match item {
437                    "overview" | "1" => Cow::Owned(TRACING_NEXT_OVERVIEW_TARGETS.join(",")),
438                    "next" => Cow::Owned(TRACING_NEXT_TARGETS.join(",")),
439                    "turbopack" => Cow::Owned(TRACING_NEXT_TURBOPACK_TARGETS.join(",")),
440                    "turbo-tasks" => Cow::Owned(TRACING_NEXT_TURBO_TASKS_TARGETS.join(",")),
441                    "gz" => {
442                        compress = Compression::GzipFast;
443                        return None;
444                    }
445                    "gz-best" => {
446                        compress = Compression::GzipBest;
447                        return None;
448                    }
449                    _ => Cow::Borrowed(item),
450                })
451            })
452            .intersperse_with(|| Cow::Borrowed(","))
453            .collect::<String>();
454
455        let subscriber = Registry::default();
456
457        if cfg!(feature = "tokio-console") {
458            trace = format!("{trace},tokio=trace,runtime=trace");
459        }
460        #[cfg(feature = "tokio-console")]
461        let subscriber = subscriber.with(console_subscriber::spawn());
462
463        let subscriber = subscriber.with(FilterLayer::try_new(&trace).unwrap());
464
465        std::fs::create_dir_all(&internal_dir)
466            .context("Unable to create .next directory")
467            .unwrap();
468        let (trace_writer, trace_writer_guard) = match compress {
469            Compression::None => {
470                let trace_writer = std::fs::File::create(trace_file.clone()).unwrap();
471                TraceWriter::new(trace_writer)
472            }
473            Compression::GzipFast => {
474                let trace_writer = std::fs::File::create(trace_file.clone()).unwrap();
475                let trace_writer = GzEncoder::new(trace_writer, flate2::Compression::fast());
476                TraceWriter::new(trace_writer)
477            }
478            Compression::GzipBest => {
479                let trace_writer = std::fs::File::create(trace_file.clone()).unwrap();
480                let trace_writer = GzEncoder::new(trace_writer, flate2::Compression::best());
481                TraceWriter::new(trace_writer)
482            }
483        };
484        let subscriber = subscriber.with(RawTraceLayer::new(trace_writer));
485
486        exit.on_exit(async move {
487            tokio::task::spawn_blocking(move || drop(trace_writer_guard))
488                .await
489                .unwrap();
490        });
491
492        let trace_server = std::env::var("NEXT_TURBOPACK_TRACE_SERVER").ok();
493        if trace_server.is_some() {
494            thread::spawn(move || {
495                turbopack_trace_server::start_turbopack_trace_server(trace_file, None);
496            });
497            println!("Turbopack trace server started. View trace at https://trace.nextjs.org");
498        }
499
500        subscriber.init();
501    }
502
503    env.spawn_future(
504        async move {
505            let memory_limit = turbo_engine_options
506                .memory_limit
507                .map(|m| m as usize)
508                .unwrap_or(usize::MAX);
509            let persistent_caching = turbo_engine_options.persistent_caching.unwrap_or_default();
510            let dependency_tracking = turbo_engine_options.dependency_tracking.unwrap_or(true);
511            let is_ci = turbo_engine_options.is_ci.unwrap_or(false);
512            let is_short_session = turbo_engine_options.is_short_session.unwrap_or(false);
513            let turbo_tasks = create_turbo_tasks(
514                PathBuf::from(&options.dist_dir),
515                persistent_caching,
516                memory_limit,
517                dependency_tracking,
518                is_ci,
519                is_short_session,
520            )?;
521            let turbopack_ctx = NextTurbopackContext::new(turbo_tasks.clone(), napi_callbacks);
522
523            if let Some(stats_path) = std::env::var_os("NEXT_TURBOPACK_TASK_STATISTICS") {
524                let task_stats = turbo_tasks.task_statistics().enable().clone();
525                exit.on_exit(async move {
526                    tokio::task::spawn_blocking(move || {
527                        let mut file = std::fs::File::create(&stats_path)
528                            .with_context(|| format!("failed to create or open {stats_path:?}"))?;
529                        serde_json::to_writer(&file, &task_stats)
530                            .context("failed to serialize or write task statistics")?;
531                        file.flush().context("failed to flush file")
532                    })
533                    .await
534                    .unwrap()
535                    .unwrap();
536                });
537            }
538
539            let options: ProjectOptions = options.into();
540            let is_dev = options.dev;
541            let container = turbo_tasks
542                .run(async move {
543                    let project = ProjectContainer::new(rcstr!("next.js"), is_dev);
544                    let project = project.to_resolved().await?;
545                    project.initialize(options).await?;
546                    Ok(project)
547                })
548                .or_else(|e| turbopack_ctx.throw_turbopack_internal_result(&e.into()))
549                .await?;
550
551            if is_dev {
552                Handle::current().spawn({
553                    let tt = turbo_tasks.clone();
554                    async move {
555                        let result = tt
556                            .clone()
557                            .run(async move {
558                                benchmark_file_io(
559                                    tt,
560                                    container.project().node_root().owned().await?,
561                                )
562                                .await
563                            })
564                            .await;
565                        if let Err(err) = result {
566                            // TODO Not ideal to print directly to stdout.
567                            // We should use a compilation event instead to report async errors.
568                            println!("Failed to benchmark file I/O: {err}");
569                        }
570                    }
571                    .instrument(tracing::info_span!("benchmark file I/O"))
572                });
573            }
574
575            Ok(External::new(ProjectInstance {
576                turbopack_ctx,
577                container,
578                exit_receiver: tokio::sync::Mutex::new(Some(exit_receiver)),
579            }))
580        }
581        .instrument(tracing::info_span!("create project")),
582    )
583}
584
585#[derive(Debug, Clone, Serialize)]
586struct SlowFilesystemEvent {
587    directory: String,
588    duration_ms: u128,
589}
590
591impl CompilationEvent for SlowFilesystemEvent {
592    fn type_name(&self) -> &'static str {
593        "SlowFilesystemEvent"
594    }
595
596    fn severity(&self) -> Severity {
597        Severity::Warning
598    }
599
600    fn message(&self) -> String {
601        format!(
602            "Slow filesystem detected. The benchmark took {}ms. If {} is a network drive, \
603             consider moving it to a local folder. If you have an antivirus enabled, consider \
604             excluding your project directory.",
605            self.duration_ms, self.directory
606        )
607    }
608
609    fn to_json(&self) -> String {
610        serde_json::to_string(self).unwrap()
611    }
612}
613
614/// A very simple and low-overhead, but potentially noisy benchmark to detect
615/// very slow disk IO. Warns the user (via `println!`) if the benchmark takes
616/// more than `SLOW_FILESYSTEM_THRESHOLD`.
617///
618/// This idea is copied from Bun:
619/// - https://x.com/jarredsumner/status/1637549427677364224
620/// - https://github.com/oven-sh/bun/blob/06a9aa80c38b08b3148bfeabe560/src/install/install.zig#L3038
621async fn benchmark_file_io(turbo_tasks: NextTurboTasks, directory: FileSystemPath) -> Result<()> {
622    // try to get the real file path on disk so that we can use it with tokio
623    let fs = ResolvedVc::try_downcast_type::<DiskFileSystem>(directory.fs)
624        .context(anyhow!(
625            "expected node_root to be a DiskFileSystem, cannot benchmark"
626        ))?
627        .await?;
628
629    let directory = fs.to_sys_path(&directory);
630    let temp_path = directory.join(format!(
631        "tmp_file_io_benchmark_{:x}",
632        rand::random::<u128>()
633    ));
634
635    let mut random_buffer = [0u8; 512];
636    rand::rng().fill(&mut random_buffer[..]);
637
638    // perform IO directly with tokio (skipping `tokio_tasks_fs`) to avoid the
639    // additional noise/overhead of tasks caching, invalidation, file locks,
640    // etc.
641    let start = Instant::now();
642    async {
643        for _ in 0..3 {
644            // create a new empty file
645            let mut file = tokio::fs::File::create(&temp_path).await?;
646            file.write_all(&random_buffer).await?;
647            file.sync_all().await?;
648            drop(file);
649
650            // remove the file
651            tokio::fs::remove_file(&temp_path).await?;
652        }
653        anyhow::Ok(())
654    }
655    .instrument(tracing::info_span!("benchmark file IO (measurement)", path = %temp_path.display()))
656    .await?;
657
658    let duration = Instant::now().duration_since(start);
659    if duration > SLOW_FILESYSTEM_THRESHOLD {
660        turbo_tasks.send_compilation_event(Arc::new(SlowFilesystemEvent {
661            directory: directory.to_string_lossy().into(),
662            duration_ms: duration.as_millis(),
663        }));
664    }
665
666    Ok(())
667}
668
669#[tracing::instrument(level = "info", name = "update project", skip_all)]
670#[napi]
671pub async fn project_update(
672    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
673    options: NapiPartialProjectOptions,
674) -> napi::Result<()> {
675    let ctx = &project.turbopack_ctx;
676    let options = options.into();
677    let container = project.container;
678    ctx.turbo_tasks()
679        .run(async move {
680            container.update(options).await?;
681            Ok(())
682        })
683        .or_else(|e| ctx.throw_turbopack_internal_result(&e.into()))
684        .await
685}
686
687/// Invalidates the filesystem cache so that it will be deleted next time that a turbopack project
688/// is created with filesystem cache enabled.
689#[napi]
690pub async fn project_invalidate_file_system_cache(
691    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
692) -> napi::Result<()> {
693    tokio::task::spawn_blocking(move || {
694        // TODO: Let the JS caller specify a reason? We need to limit the reasons to ones we know
695        // how to generate a message for on the Rust side of the FFI.
696        project
697            .turbopack_ctx
698            .turbo_tasks()
699            .backend()
700            .backing_storage()
701            .invalidate(invalidation_reasons::USER_REQUEST)
702    })
703    .await
704    .context("panicked while invalidating filesystem cache")??;
705    Ok(())
706}
707
708/// Runs exit handlers for the project registered using the [`ExitHandler`] API.
709///
710/// This is called by `project_shutdown`, so if you're calling that API, you shouldn't call this
711/// one.
712#[napi]
713pub async fn project_on_exit(
714    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
715) {
716    project_on_exit_internal(&project).await
717}
718
719async fn project_on_exit_internal(project: &ProjectInstance) {
720    let exit_receiver = project.exit_receiver.lock().await.take();
721    exit_receiver
722        .expect("`project.onExitSync` must only be called once")
723        .run_exit_handler()
724        .await;
725}
726
727/// Runs `project_on_exit`, and then waits for turbo_tasks to gracefully shut down.
728///
729/// This is used in builds where it's important that we completely persist turbo-tasks to disk, but
730/// it's skipped in the development server (`project_on_exit` is used instead with a short timeout),
731/// where we prioritize fast exit and user responsiveness over all else.
732#[tracing::instrument(level = "info", name = "shutdown project", skip_all)]
733#[napi]
734pub async fn project_shutdown(
735    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
736) {
737    project.turbopack_ctx.turbo_tasks().stop_and_wait().await;
738    project_on_exit_internal(&project).await;
739}
740
741#[napi(object)]
742#[derive(Default)]
743pub struct AppPageNapiRoute {
744    /// The relative path from project_path to the route file
745    pub original_name: Option<RcStr>,
746
747    pub html_endpoint: Option<External<ExternalEndpoint>>,
748    pub rsc_endpoint: Option<External<ExternalEndpoint>>,
749}
750
751#[napi(object)]
752#[derive(Default)]
753pub struct NapiRoute {
754    /// The router path
755    pub pathname: String,
756    /// The relative path from project_path to the route file
757    pub original_name: Option<RcStr>,
758
759    /// The type of route, eg a Page or App
760    pub r#type: &'static str,
761
762    pub pages: Option<Vec<AppPageNapiRoute>>,
763
764    // Different representations of the endpoint
765    pub endpoint: Option<External<ExternalEndpoint>>,
766    pub html_endpoint: Option<External<ExternalEndpoint>>,
767    pub rsc_endpoint: Option<External<ExternalEndpoint>>,
768    pub data_endpoint: Option<External<ExternalEndpoint>>,
769}
770
771impl NapiRoute {
772    fn from_route(
773        pathname: String,
774        value: RouteOperation,
775        turbopack_ctx: &NextTurbopackContext,
776    ) -> Self {
777        let convert_endpoint = |endpoint: OperationVc<OptionEndpoint>| {
778            Some(External::new(ExternalEndpoint(DetachedVc::new(
779                turbopack_ctx.clone(),
780                endpoint,
781            ))))
782        };
783        match value {
784            RouteOperation::Page {
785                html_endpoint,
786                data_endpoint,
787            } => NapiRoute {
788                pathname,
789                r#type: "page",
790                html_endpoint: convert_endpoint(html_endpoint),
791                data_endpoint: convert_endpoint(data_endpoint),
792                ..Default::default()
793            },
794            RouteOperation::PageApi { endpoint } => NapiRoute {
795                pathname,
796                r#type: "page-api",
797                endpoint: convert_endpoint(endpoint),
798                ..Default::default()
799            },
800            RouteOperation::AppPage(pages) => NapiRoute {
801                pathname,
802                r#type: "app-page",
803                pages: Some(
804                    pages
805                        .into_iter()
806                        .map(|page_route| AppPageNapiRoute {
807                            original_name: Some(page_route.original_name),
808                            html_endpoint: convert_endpoint(page_route.html_endpoint),
809                            rsc_endpoint: convert_endpoint(page_route.rsc_endpoint),
810                        })
811                        .collect(),
812                ),
813                ..Default::default()
814            },
815            RouteOperation::AppRoute {
816                original_name,
817                endpoint,
818            } => NapiRoute {
819                pathname,
820                original_name: Some(original_name),
821                r#type: "app-route",
822                endpoint: convert_endpoint(endpoint),
823                ..Default::default()
824            },
825            RouteOperation::Conflict => NapiRoute {
826                pathname,
827                r#type: "conflict",
828                ..Default::default()
829            },
830        }
831    }
832}
833
834#[napi(object)]
835pub struct NapiMiddleware {
836    pub endpoint: External<ExternalEndpoint>,
837    pub is_proxy: bool,
838}
839
840impl NapiMiddleware {
841    fn from_middleware(
842        value: &MiddlewareOperation,
843        turbopack_ctx: &NextTurbopackContext,
844    ) -> Result<Self> {
845        Ok(NapiMiddleware {
846            endpoint: External::new(ExternalEndpoint(DetachedVc::new(
847                turbopack_ctx.clone(),
848                value.endpoint,
849            ))),
850            is_proxy: value.is_proxy,
851        })
852    }
853}
854
855#[napi(object)]
856pub struct NapiInstrumentation {
857    pub node_js: External<ExternalEndpoint>,
858    pub edge: External<ExternalEndpoint>,
859}
860
861impl NapiInstrumentation {
862    fn from_instrumentation(
863        value: &InstrumentationOperation,
864        turbopack_ctx: &NextTurbopackContext,
865    ) -> Result<Self> {
866        Ok(NapiInstrumentation {
867            node_js: External::new(ExternalEndpoint(DetachedVc::new(
868                turbopack_ctx.clone(),
869                value.node_js,
870            ))),
871            edge: External::new(ExternalEndpoint(DetachedVc::new(
872                turbopack_ctx.clone(),
873                value.edge,
874            ))),
875        })
876    }
877}
878
879#[napi(object)]
880pub struct NapiEntrypoints {
881    pub routes: Vec<NapiRoute>,
882    pub middleware: Option<NapiMiddleware>,
883    pub instrumentation: Option<NapiInstrumentation>,
884    pub pages_document_endpoint: External<ExternalEndpoint>,
885    pub pages_app_endpoint: External<ExternalEndpoint>,
886    pub pages_error_endpoint: External<ExternalEndpoint>,
887}
888
889impl NapiEntrypoints {
890    fn from_entrypoints_op(
891        entrypoints: &EntrypointsOperation,
892        turbopack_ctx: &NextTurbopackContext,
893    ) -> Result<Self> {
894        let routes = entrypoints
895            .routes
896            .iter()
897            .map(|(k, v)| NapiRoute::from_route(k.to_string(), v.clone(), turbopack_ctx))
898            .collect();
899        let middleware = entrypoints
900            .middleware
901            .as_ref()
902            .map(|m| NapiMiddleware::from_middleware(m, turbopack_ctx))
903            .transpose()?;
904        let instrumentation = entrypoints
905            .instrumentation
906            .as_ref()
907            .map(|i| NapiInstrumentation::from_instrumentation(i, turbopack_ctx))
908            .transpose()?;
909        let pages_document_endpoint = External::new(ExternalEndpoint(DetachedVc::new(
910            turbopack_ctx.clone(),
911            entrypoints.pages_document_endpoint,
912        )));
913        let pages_app_endpoint = External::new(ExternalEndpoint(DetachedVc::new(
914            turbopack_ctx.clone(),
915            entrypoints.pages_app_endpoint,
916        )));
917        let pages_error_endpoint = External::new(ExternalEndpoint(DetachedVc::new(
918            turbopack_ctx.clone(),
919            entrypoints.pages_error_endpoint,
920        )));
921        Ok(NapiEntrypoints {
922            routes,
923            middleware,
924            instrumentation,
925            pages_document_endpoint,
926            pages_app_endpoint,
927            pages_error_endpoint,
928        })
929    }
930}
931
932#[turbo_tasks::value(serialization = "none")]
933struct EntrypointsWithIssues {
934    entrypoints: Option<ReadRef<EntrypointsOperation>>,
935    issues: Arc<Vec<ReadRef<PlainIssue>>>,
936    diagnostics: Arc<Vec<ReadRef<PlainDiagnostic>>>,
937    effects: Arc<Effects>,
938}
939
940#[turbo_tasks::function(operation)]
941async fn get_entrypoints_with_issues_operation(
942    container: ResolvedVc<ProjectContainer>,
943) -> Result<Vc<EntrypointsWithIssues>> {
944    let entrypoints_operation =
945        EntrypointsOperation::new(project_container_entrypoints_operation(container));
946    let (entrypoints, issues, diagnostics, effects) =
947        strongly_consistent_catch_collectables(entrypoints_operation).await?;
948    Ok(EntrypointsWithIssues {
949        entrypoints,
950        issues,
951        diagnostics,
952        effects,
953    }
954    .cell())
955}
956
957#[turbo_tasks::function(operation)]
958fn project_container_entrypoints_operation(
959    // the container is a long-lived object with internally mutable state, there's no risk of it
960    // becoming stale
961    container: ResolvedVc<ProjectContainer>,
962) -> Vc<Entrypoints> {
963    container.entrypoints()
964}
965
966#[turbo_tasks::value(serialization = "none")]
967struct OperationResult {
968    issues: Arc<Vec<ReadRef<PlainIssue>>>,
969    diagnostics: Arc<Vec<ReadRef<PlainDiagnostic>>>,
970    effects: Arc<Effects>,
971}
972
973#[turbo_tasks::value(serialization = "none")]
974struct AllWrittenEntrypointsWithIssues {
975    entrypoints: Option<ReadRef<EntrypointsOperation>>,
976    issues: Arc<Vec<ReadRef<PlainIssue>>>,
977    diagnostics: Arc<Vec<ReadRef<PlainDiagnostic>>>,
978    effects: Arc<Effects>,
979}
980
981#[napi(object)]
982#[derive(Clone, Debug)]
983pub struct NapiDebugBuildPaths {
984    pub app: Vec<RcStr>,
985    pub pages: Vec<RcStr>,
986}
987
988#[tracing::instrument(level = "info", name = "write all entrypoints to disk", skip_all)]
989#[napi]
990pub async fn project_write_all_entrypoints_to_disk(
991    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
992    app_dir_only: bool,
993) -> napi::Result<TurbopackResult<Option<NapiEntrypoints>>> {
994    let ctx = &project.turbopack_ctx;
995    let container = project.container;
996    let tt = ctx.turbo_tasks();
997
998    let (entrypoints, issues, diags) = tt
999        .run(async move {
1000            let entrypoints_with_issues_op =
1001                get_all_written_entrypoints_with_issues_operation(container, app_dir_only);
1002
1003            // Read and compile the files
1004            let AllWrittenEntrypointsWithIssues {
1005                entrypoints,
1006                issues,
1007                diagnostics,
1008                effects,
1009            } = &*entrypoints_with_issues_op
1010                .read_strongly_consistent()
1011                .await?;
1012
1013            // Write the files to disk
1014            effects.apply().await?;
1015
1016            Ok((entrypoints.clone(), issues.clone(), diagnostics.clone()))
1017        })
1018        .or_else(|e| ctx.throw_turbopack_internal_result(&e.into()))
1019        .await?;
1020
1021    Ok(TurbopackResult {
1022        result: if let Some(entrypoints) = entrypoints {
1023            Some(NapiEntrypoints::from_entrypoints_op(
1024                &entrypoints,
1025                &project.turbopack_ctx,
1026            )?)
1027        } else {
1028            None
1029        },
1030        issues: issues.iter().map(|i| NapiIssue::from(&**i)).collect(),
1031        diagnostics: diags.iter().map(|d| NapiDiagnostic::from(d)).collect(),
1032    })
1033}
1034
1035#[turbo_tasks::function(operation)]
1036async fn get_all_written_entrypoints_with_issues_operation(
1037    container: ResolvedVc<ProjectContainer>,
1038    app_dir_only: bool,
1039) -> Result<Vc<AllWrittenEntrypointsWithIssues>> {
1040    let entrypoints_operation = EntrypointsOperation::new(all_entrypoints_write_to_disk_operation(
1041        container,
1042        app_dir_only,
1043    ));
1044    let (entrypoints, issues, diagnostics, effects) =
1045        strongly_consistent_catch_collectables(entrypoints_operation).await?;
1046    Ok(AllWrittenEntrypointsWithIssues {
1047        entrypoints,
1048        issues,
1049        diagnostics,
1050        effects,
1051    }
1052    .cell())
1053}
1054
1055#[turbo_tasks::function(operation)]
1056pub async fn all_entrypoints_write_to_disk_operation(
1057    project: ResolvedVc<ProjectContainer>,
1058    app_dir_only: bool,
1059) -> Result<Vc<Entrypoints>> {
1060    let output_assets_operation = output_assets_operation(project, app_dir_only);
1061    project
1062        .project()
1063        .emit_all_output_assets(output_assets_operation)
1064        .as_side_effect()
1065        .await?;
1066
1067    Ok(project.entrypoints())
1068}
1069
1070#[turbo_tasks::function(operation)]
1071async fn output_assets_operation(
1072    container: ResolvedVc<ProjectContainer>,
1073    app_dir_only: bool,
1074) -> Result<Vc<OutputAssets>> {
1075    let project = container.project();
1076    let whole_app_module_graphs = project.whole_app_module_graphs();
1077    let endpoint_assets = project
1078        .get_all_endpoints(app_dir_only)
1079        .await?
1080        .iter()
1081        .map(|endpoint| async move { endpoint.output().await?.output_assets.await })
1082        .try_join()
1083        .await?;
1084
1085    let output_assets: FxIndexSet<ResolvedVc<Box<dyn OutputAsset>>> = endpoint_assets
1086        .iter()
1087        .flat_map(|assets| assets.iter().copied())
1088        .collect();
1089
1090    let nft = next_server_nft_assets(project).await?;
1091
1092    let routes_hashes_manifest = routes_hashes_manifest_asset_if_enabled(project).await?;
1093
1094    whole_app_module_graphs.as_side_effect().await?;
1095
1096    Ok(Vc::cell(
1097        output_assets
1098            .into_iter()
1099            .chain(nft.iter().copied())
1100            .chain(routes_hashes_manifest.iter().copied())
1101            .collect(),
1102    ))
1103}
1104
1105#[tracing::instrument(level = "info", name = "get entrypoints", skip_all)]
1106#[napi]
1107pub async fn project_entrypoints(
1108    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1109) -> napi::Result<TurbopackResult<Option<NapiEntrypoints>>> {
1110    let container = project.container;
1111
1112    let (entrypoints, issues, diags) = project
1113        .turbopack_ctx
1114        .turbo_tasks()
1115        .run_once(async move {
1116            let entrypoints_with_issues_op = get_entrypoints_with_issues_operation(container);
1117
1118            // Read and compile the files
1119            let EntrypointsWithIssues {
1120                entrypoints,
1121                issues,
1122                diagnostics,
1123                effects: _,
1124            } = &*entrypoints_with_issues_op
1125                .read_strongly_consistent()
1126                .await?;
1127
1128            Ok((entrypoints.clone(), issues.clone(), diagnostics.clone()))
1129        })
1130        .await
1131        .map_err(|e| napi::Error::from_reason(PrettyPrintError(&e).to_string()))?;
1132
1133    let result = match entrypoints {
1134        Some(entrypoints) => Some(NapiEntrypoints::from_entrypoints_op(
1135            &entrypoints,
1136            &project.turbopack_ctx,
1137        )?),
1138        None => None,
1139    };
1140
1141    Ok(TurbopackResult {
1142        result,
1143        issues: issues.iter().map(|i| NapiIssue::from(&**i)).collect(),
1144        diagnostics: diags.iter().map(|d| NapiDiagnostic::from(d)).collect(),
1145    })
1146}
1147
1148#[tracing::instrument(level = "info", name = "subscribe to entrypoints", skip_all)]
1149#[napi(ts_return_type = "{ __napiType: \"RootTask\" }")]
1150pub fn project_entrypoints_subscribe(
1151    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1152    func: JsFunction,
1153) -> napi::Result<External<RootTask>> {
1154    let turbopack_ctx = project.turbopack_ctx.clone();
1155    let container = project.container;
1156    subscribe(
1157        turbopack_ctx.clone(),
1158        func,
1159        move || {
1160            async move {
1161                let entrypoints_with_issues_op = get_entrypoints_with_issues_operation(container);
1162                let EntrypointsWithIssues {
1163                    entrypoints,
1164                    issues,
1165                    diagnostics,
1166                    effects,
1167                } = &*entrypoints_with_issues_op
1168                    .read_strongly_consistent()
1169                    .await?;
1170
1171                effects.apply().await?;
1172                Ok((entrypoints.clone(), issues.clone(), diagnostics.clone()))
1173            }
1174            .instrument(tracing::info_span!("entrypoints subscription"))
1175        },
1176        move |ctx| {
1177            let (entrypoints, issues, diags) = ctx.value;
1178            let result = match entrypoints {
1179                Some(entrypoints) => Some(NapiEntrypoints::from_entrypoints_op(
1180                    &entrypoints,
1181                    &turbopack_ctx,
1182                )?),
1183                None => None,
1184            };
1185
1186            Ok(vec![TurbopackResult {
1187                result,
1188                issues: issues
1189                    .iter()
1190                    .map(|issue| NapiIssue::from(&**issue))
1191                    .collect(),
1192                diagnostics: diags.iter().map(|d| NapiDiagnostic::from(d)).collect(),
1193            }])
1194        },
1195    )
1196}
1197
1198#[turbo_tasks::value(serialization = "none")]
1199struct HmrUpdateWithIssues {
1200    update: ReadRef<Update>,
1201    issues: Arc<Vec<ReadRef<PlainIssue>>>,
1202    diagnostics: Arc<Vec<ReadRef<PlainDiagnostic>>>,
1203    effects: Arc<Effects>,
1204}
1205
1206#[turbo_tasks::function(operation)]
1207async fn hmr_update_with_issues_operation(
1208    project: ResolvedVc<Project>,
1209    identifier: RcStr,
1210    state: ResolvedVc<VersionState>,
1211) -> Result<Vc<HmrUpdateWithIssues>> {
1212    let update_op = project_hmr_update_operation(project, identifier, state);
1213    let update = update_op.read_strongly_consistent().await?;
1214    let issues = get_issues(update_op, NEXT_ISSUE_FILTER).await?;
1215    let diagnostics = get_diagnostics(update_op).await?;
1216    let effects = Arc::new(get_effects(update_op).await?);
1217    Ok(HmrUpdateWithIssues {
1218        update,
1219        issues,
1220        diagnostics,
1221        effects,
1222    }
1223    .cell())
1224}
1225
1226#[turbo_tasks::function(operation)]
1227fn project_hmr_update_operation(
1228    project: ResolvedVc<Project>,
1229    identifier: RcStr,
1230    state: ResolvedVc<VersionState>,
1231) -> Vc<Update> {
1232    project.hmr_update(identifier, *state)
1233}
1234
1235#[tracing::instrument(level = "info", name = "get HMR events", skip(project, func))]
1236#[napi(ts_return_type = "{ __napiType: \"RootTask\" }")]
1237pub fn project_hmr_events(
1238    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1239    identifier: RcStr,
1240    func: JsFunction,
1241) -> napi::Result<External<RootTask>> {
1242    let container = project.container;
1243    let session = TransientInstance::new(());
1244    subscribe(
1245        project.turbopack_ctx.clone(),
1246        func,
1247        {
1248            let outer_identifier = identifier.clone();
1249            let session = session.clone();
1250            move || {
1251                let identifier: RcStr = outer_identifier.clone();
1252                let session = session.clone();
1253                async move {
1254                    let project = container.project().to_resolved().await?;
1255                    let state = project
1256                        .hmr_version_state(identifier.clone(), session)
1257                        .to_resolved()
1258                        .await?;
1259
1260                    let update_op =
1261                        hmr_update_with_issues_operation(project, identifier.clone(), state);
1262                    let update = update_op.read_strongly_consistent().await?;
1263                    let HmrUpdateWithIssues {
1264                        update,
1265                        issues,
1266                        diagnostics,
1267                        effects,
1268                    } = &*update;
1269                    effects.apply().await?;
1270                    match &**update {
1271                        Update::Missing | Update::None => {}
1272                        Update::Total(TotalUpdate { to }) => {
1273                            state.set(to.clone()).await?;
1274                        }
1275                        Update::Partial(PartialUpdate { to, .. }) => {
1276                            state.set(to.clone()).await?;
1277                        }
1278                    }
1279                    Ok((Some(update.clone()), issues.clone(), diagnostics.clone()))
1280                }
1281            }
1282        },
1283        move |ctx| {
1284            let (update, issues, diags) = ctx.value;
1285
1286            let napi_issues = issues
1287                .iter()
1288                .map(|issue| NapiIssue::from(&**issue))
1289                .collect();
1290            let update_issues = issues
1291                .iter()
1292                .map(|issue| Issue::from(&**issue))
1293                .collect::<Vec<_>>();
1294
1295            let identifier = ResourceIdentifier {
1296                path: identifier.clone(),
1297                headers: None,
1298            };
1299            let update = match update.as_deref() {
1300                None | Some(Update::Missing) | Some(Update::Total(_)) => {
1301                    ClientUpdateInstruction::restart(&identifier, &update_issues)
1302                }
1303                Some(Update::Partial(update)) => ClientUpdateInstruction::partial(
1304                    &identifier,
1305                    &update.instruction,
1306                    &update_issues,
1307                ),
1308                Some(Update::None) => ClientUpdateInstruction::issues(&identifier, &update_issues),
1309            };
1310
1311            Ok(vec![TurbopackResult {
1312                result: ctx.env.to_js_value(&update)?,
1313                issues: napi_issues,
1314                diagnostics: diags.iter().map(|d| NapiDiagnostic::from(d)).collect(),
1315            }])
1316        },
1317    )
1318}
1319
1320#[napi(object)]
1321struct HmrIdentifiers {
1322    pub identifiers: Vec<RcStr>,
1323}
1324
1325#[turbo_tasks::value(serialization = "none")]
1326struct HmrIdentifiersWithIssues {
1327    identifiers: ReadRef<Vec<RcStr>>,
1328    issues: Arc<Vec<ReadRef<PlainIssue>>>,
1329    diagnostics: Arc<Vec<ReadRef<PlainDiagnostic>>>,
1330    effects: Arc<Effects>,
1331}
1332
1333#[turbo_tasks::function(operation)]
1334async fn get_hmr_identifiers_with_issues_operation(
1335    container: ResolvedVc<ProjectContainer>,
1336) -> Result<Vc<HmrIdentifiersWithIssues>> {
1337    let hmr_identifiers_op = project_container_hmr_identifiers_operation(container);
1338    let hmr_identifiers = hmr_identifiers_op.read_strongly_consistent().await?;
1339    let issues = get_issues(hmr_identifiers_op, NEXT_ISSUE_FILTER).await?;
1340    let diagnostics = get_diagnostics(hmr_identifiers_op).await?;
1341    let effects = Arc::new(get_effects(hmr_identifiers_op).await?);
1342    Ok(HmrIdentifiersWithIssues {
1343        identifiers: hmr_identifiers,
1344        issues,
1345        diagnostics,
1346        effects,
1347    }
1348    .cell())
1349}
1350
1351#[turbo_tasks::function(operation)]
1352fn project_container_hmr_identifiers_operation(
1353    container: ResolvedVc<ProjectContainer>,
1354) -> Vc<Vec<RcStr>> {
1355    container.hmr_identifiers()
1356}
1357
1358#[tracing::instrument(level = "info", name = "get HMR identifiers", skip_all)]
1359#[napi(ts_return_type = "{ __napiType: \"RootTask\" }")]
1360pub fn project_hmr_identifiers_subscribe(
1361    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1362    func: JsFunction,
1363) -> napi::Result<External<RootTask>> {
1364    let container = project.container;
1365    subscribe(
1366        project.turbopack_ctx.clone(),
1367        func,
1368        move || async move {
1369            let hmr_identifiers_with_issues_op =
1370                get_hmr_identifiers_with_issues_operation(container);
1371            let HmrIdentifiersWithIssues {
1372                identifiers,
1373                issues,
1374                diagnostics,
1375                effects,
1376            } = &*hmr_identifiers_with_issues_op
1377                .read_strongly_consistent()
1378                .await?;
1379            effects.apply().await?;
1380
1381            Ok((identifiers.clone(), issues.clone(), diagnostics.clone()))
1382        },
1383        move |ctx| {
1384            let (identifiers, issues, diagnostics) = ctx.value;
1385
1386            Ok(vec![TurbopackResult {
1387                result: HmrIdentifiers {
1388                    identifiers: ReadRef::into_owned(identifiers),
1389                },
1390                issues: issues
1391                    .iter()
1392                    .map(|issue| NapiIssue::from(&**issue))
1393                    .collect(),
1394                diagnostics: diagnostics
1395                    .iter()
1396                    .map(|d| NapiDiagnostic::from(d))
1397                    .collect(),
1398            }])
1399        },
1400    )
1401}
1402
1403pub enum UpdateMessage {
1404    Start,
1405    End(UpdateInfo),
1406}
1407
1408#[napi(object)]
1409struct NapiUpdateMessage {
1410    pub update_type: &'static str,
1411    pub value: Option<NapiUpdateInfo>,
1412}
1413
1414impl From<UpdateMessage> for NapiUpdateMessage {
1415    fn from(update_message: UpdateMessage) -> Self {
1416        match update_message {
1417            UpdateMessage::Start => NapiUpdateMessage {
1418                update_type: "start",
1419                value: None,
1420            },
1421            UpdateMessage::End(info) => NapiUpdateMessage {
1422                update_type: "end",
1423                value: Some(info.into()),
1424            },
1425        }
1426    }
1427}
1428
1429#[napi(object)]
1430struct NapiUpdateInfo {
1431    pub duration: u32,
1432    pub tasks: u32,
1433}
1434
1435impl From<UpdateInfo> for NapiUpdateInfo {
1436    fn from(update_info: UpdateInfo) -> Self {
1437        Self {
1438            duration: update_info.duration.as_millis() as u32,
1439            tasks: update_info.tasks as u32,
1440        }
1441    }
1442}
1443
1444/// Subscribes to lifecycle events of the compilation.
1445///
1446/// Emits an [UpdateMessage::Start] event when any computation starts.
1447/// Emits an [UpdateMessage::End] event when there was no computation for the
1448/// specified time (`aggregation_ms`). The [UpdateMessage::End] event contains
1449/// information about the computations that happened since the
1450/// [UpdateMessage::Start] event. It contains the duration of the computation
1451/// (excluding the idle time that was spend waiting for `aggregation_ms`), and
1452/// the number of tasks that were executed.
1453///
1454/// The signature of the `func` is `(update_message: UpdateMessage) => void`.
1455#[napi]
1456pub fn project_update_info_subscribe(
1457    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1458    aggregation_ms: u32,
1459    func: JsFunction,
1460) -> napi::Result<()> {
1461    let func: ThreadsafeFunction<UpdateMessage> = func.create_threadsafe_function(0, |ctx| {
1462        let message = ctx.value;
1463        Ok(vec![NapiUpdateMessage::from(message)])
1464    })?;
1465    tokio::spawn(async move {
1466        let tt = project.turbopack_ctx.turbo_tasks();
1467        loop {
1468            let update_info = tt
1469                .aggregated_update_info(Duration::ZERO, Duration::ZERO)
1470                .await;
1471
1472            func.call(
1473                Ok(UpdateMessage::Start),
1474                ThreadsafeFunctionCallMode::NonBlocking,
1475            );
1476
1477            let update_info = match update_info {
1478                Some(update_info) => update_info,
1479                None => {
1480                    tt.get_or_wait_aggregated_update_info(Duration::from_millis(
1481                        aggregation_ms.into(),
1482                    ))
1483                    .await
1484                }
1485            };
1486
1487            let status = func.call(
1488                Ok(UpdateMessage::End(update_info)),
1489                ThreadsafeFunctionCallMode::NonBlocking,
1490            );
1491
1492            if !matches!(status, Status::Ok) {
1493                let error = anyhow!("Error calling JS function: {}", status);
1494                eprintln!("{error}");
1495                break;
1496            }
1497        }
1498    });
1499    Ok(())
1500}
1501
1502/// Subscribes to all compilation events that are not cached like timing and progress information.
1503#[napi]
1504pub fn project_compilation_events_subscribe(
1505    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1506    func: JsFunction,
1507    event_types: Option<Vec<String>>,
1508) -> napi::Result<()> {
1509    let tsfn: ThreadsafeFunction<Arc<dyn CompilationEvent>> =
1510        func.create_threadsafe_function(0, |ctx| {
1511            let event: Arc<dyn CompilationEvent> = ctx.value;
1512
1513            let env = ctx.env;
1514            let mut obj = env.create_object()?;
1515            obj.set_named_property("typeName", event.type_name())?;
1516            obj.set_named_property("severity", event.severity().to_string())?;
1517            obj.set_named_property("message", event.message())?;
1518
1519            let external = env.create_external(event, None);
1520            obj.set_named_property("eventData", external)?;
1521
1522            Ok(vec![obj])
1523        })?;
1524
1525    tokio::spawn(async move {
1526        let tt = project.turbopack_ctx.turbo_tasks();
1527        let mut receiver = tt.subscribe_to_compilation_events(event_types);
1528        while let Some(msg) = receiver.recv().await {
1529            let status = tsfn.call(Ok(msg), ThreadsafeFunctionCallMode::Blocking);
1530
1531            if status != Status::Ok {
1532                break;
1533            }
1534        }
1535    });
1536
1537    Ok(())
1538}
1539
1540#[napi(object)]
1541#[derive(
1542    Clone,
1543    Debug,
1544    Eq,
1545    Hash,
1546    NonLocalValue,
1547    OperationValue,
1548    PartialEq,
1549    TaskInput,
1550    TraceRawVcs,
1551    Encode,
1552    Decode,
1553)]
1554pub struct StackFrame {
1555    pub is_server: bool,
1556    pub is_internal: Option<bool>,
1557    pub original_file: Option<RcStr>,
1558    pub file: RcStr,
1559    /// 1-indexed, unlike source map tokens
1560    pub line: Option<u32>,
1561    /// 1-indexed, unlike source map tokens
1562    pub column: Option<u32>,
1563    pub method_name: Option<RcStr>,
1564}
1565
1566#[turbo_tasks::value(transparent)]
1567#[derive(Clone)]
1568pub struct OptionStackFrame(Option<StackFrame>);
1569
1570#[turbo_tasks::function]
1571pub async fn get_source_map_rope(
1572    container: Vc<ProjectContainer>,
1573    source_url: RcStr,
1574) -> Result<Vc<FileContent>> {
1575    let (file_path_sys, module) = match Url::parse(&source_url) {
1576        Ok(url) => match url.scheme() {
1577            "file" => {
1578                let path = match url.to_file_path() {
1579                    Ok(path) => path.to_string_lossy().into(),
1580                    Err(_) => {
1581                        bail!("Failed to convert file URL to file path: {url}");
1582                    }
1583                };
1584                let module = url.query_pairs().find(|(k, _)| k == "id");
1585                (
1586                    path,
1587                    match module {
1588                        Some(module) => Some(urlencoding::decode(&module.1)?.into_owned().into()),
1589                        None => None,
1590                    },
1591                )
1592            }
1593            _ => bail!("Unknown url scheme '{}'", url.scheme()),
1594        },
1595        Err(_) => (source_url.to_string(), None),
1596    };
1597
1598    let chunk_base_unix =
1599        match file_path_sys.strip_prefix(container.project().dist_dir_absolute().await?.as_str()) {
1600            Some(relative_path) => sys_to_unix(relative_path),
1601            None => {
1602                // File doesn't exist within the dist dir
1603                return Ok(FileContent::NotFound.cell());
1604            }
1605        };
1606
1607    let server_path = container
1608        .project()
1609        .node_root()
1610        .await?
1611        .join(&chunk_base_unix)?;
1612
1613    let client_path = container
1614        .project()
1615        .client_relative_path()
1616        .await?
1617        .join(&chunk_base_unix)?;
1618
1619    let mut map = container.get_source_map(server_path, module.clone());
1620
1621    if !map.await?.is_content() {
1622        // If the chunk doesn't exist as a server chunk, try a client chunk.
1623        // TODO: Properly tag all server chunks and use the `isServer` query param.
1624        // Currently, this is inaccurate as it does not cover RSC server
1625        // chunks.
1626        map = container.get_source_map(client_path, module);
1627        if !map.await?.is_content() {
1628            bail!("chunk/module '{}' is missing a sourcemap", source_url);
1629        }
1630    }
1631
1632    Ok(map)
1633}
1634
1635#[turbo_tasks::function(operation)]
1636pub fn get_source_map_rope_operation(
1637    container: ResolvedVc<ProjectContainer>,
1638    file_path: RcStr,
1639) -> Vc<FileContent> {
1640    get_source_map_rope(*container, file_path)
1641}
1642
1643#[turbo_tasks::function(operation)]
1644pub async fn project_trace_source_operation(
1645    container: ResolvedVc<ProjectContainer>,
1646    frame: StackFrame,
1647    current_directory_file_url: RcStr,
1648) -> Result<Vc<OptionStackFrame>> {
1649    let Some(map) =
1650        &*SourceMap::new_from_rope_cached(get_source_map_rope(*container, frame.file)).await?
1651    else {
1652        return Ok(Vc::cell(None));
1653    };
1654
1655    let Some(line) = frame.line else {
1656        return Ok(Vc::cell(None));
1657    };
1658
1659    let token = map.lookup_token(
1660        line.saturating_sub(1),
1661        frame.column.unwrap_or(1).saturating_sub(1),
1662    );
1663
1664    let (original_file, line, column, method_name) = match token {
1665        Token::Original(token) => (
1666            match urlencoding::decode(&token.original_file)? {
1667                Cow::Borrowed(_) => token.original_file,
1668                Cow::Owned(original_file) => RcStr::from(original_file),
1669            },
1670            // JS stack frames are 1-indexed, source map tokens are 0-indexed
1671            Some(token.original_line + 1),
1672            Some(token.original_column + 1),
1673            token.name,
1674        ),
1675        Token::Synthetic(token) => {
1676            let Some(original_file) = token.guessed_original_file else {
1677                return Ok(Vc::cell(None));
1678            };
1679            (original_file, None, None, None)
1680        }
1681    };
1682
1683    let project_root_uri =
1684        uri_from_file(container.project().project_root_path().owned().await?, None).await? + "/";
1685    let (file, original_file, is_internal) =
1686        if let Some(source_file) = original_file.strip_prefix(&project_root_uri) {
1687            // Client code uses file://
1688            (
1689                RcStr::from(
1690                    get_relative_path_to(&current_directory_file_url, &original_file)
1691                        // TODO(sokra) remove this to include a ./ here to make it a relative path
1692                        .trim_start_matches("./"),
1693                ),
1694                Some(RcStr::from(source_file)),
1695                false,
1696            )
1697        } else if let Some(source_file) = original_file.strip_prefix(&*SOURCE_MAP_PREFIX_PROJECT) {
1698            // Server code uses turbopack:///[project]
1699            // TODO should this also be file://?
1700            (
1701                RcStr::from(
1702                    get_relative_path_to(
1703                        &current_directory_file_url,
1704                        &format!("{project_root_uri}{source_file}"),
1705                    )
1706                    // TODO(sokra) remove this to include a ./ here to make it a relative path
1707                    .trim_start_matches("./"),
1708                ),
1709                Some(RcStr::from(source_file)),
1710                false,
1711            )
1712        } else if let Some(source_file) = original_file.strip_prefix(&*SOURCE_MAP_PREFIX) {
1713            // All other code like turbopack:///[turbopack] is internal code
1714            // TODO(veil): Should the protocol be preserved?
1715            (RcStr::from(source_file), None, true)
1716        } else {
1717            bail!(
1718                "Original file ({}) outside project ({})",
1719                original_file,
1720                project_root_uri
1721            )
1722        };
1723
1724    Ok(Vc::cell(Some(StackFrame {
1725        file,
1726        original_file,
1727        method_name,
1728        line,
1729        column,
1730        is_server: frame.is_server,
1731        is_internal: Some(is_internal),
1732    })))
1733}
1734
1735#[tracing::instrument(level = "info", name = "apply SourceMap to stack frame", skip_all)]
1736#[napi]
1737pub async fn project_trace_source(
1738    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1739    frame: StackFrame,
1740    current_directory_file_url: String,
1741) -> napi::Result<Option<StackFrame>> {
1742    let container = project.container;
1743    let ctx = &project.turbopack_ctx;
1744    ctx.turbo_tasks()
1745        .run(async move {
1746            let traced_frame = project_trace_source_operation(
1747                container,
1748                frame,
1749                RcStr::from(current_directory_file_url),
1750            )
1751            .read_strongly_consistent()
1752            .await?;
1753            Ok(ReadRef::into_owned(traced_frame))
1754        })
1755        // HACK: Don't use `TurbopackInternalError`, this function is race-condition prone (the
1756        // source files may have changed or been deleted), so these probably aren't internal errors?
1757        // Ideally we should differentiate.
1758        .await
1759        .map_err(|e| napi::Error::from_reason(PrettyPrintError(&e.into()).to_string()))
1760}
1761
1762#[tracing::instrument(level = "info", name = "get source content for asset", skip_all)]
1763#[napi]
1764pub async fn project_get_source_for_asset(
1765    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1766    file_path: RcStr,
1767) -> napi::Result<Option<String>> {
1768    let container = project.container;
1769    let ctx = &project.turbopack_ctx;
1770    ctx.turbo_tasks()
1771        .run(async move {
1772            let source_content = &*container
1773                .project()
1774                .project_path()
1775                .await?
1776                .fs()
1777                .root()
1778                .await?
1779                .join(&file_path)?
1780                .read()
1781                .await?;
1782
1783            let FileContent::Content(source_content) = source_content else {
1784                bail!("Cannot find source for asset {}", file_path);
1785            };
1786
1787            Ok(Some(source_content.content().to_str()?.into_owned()))
1788        })
1789        // HACK: Don't use `TurbopackInternalError`, this function is race-condition prone (the
1790        // source files may have changed or been deleted), so these probably aren't internal errors?
1791        // Ideally we should differentiate.
1792        .await
1793        .map_err(|e| napi::Error::from_reason(PrettyPrintError(&e.into()).to_string()))
1794}
1795
1796#[tracing::instrument(level = "info", name = "get SourceMap for asset", skip_all)]
1797#[napi]
1798pub async fn project_get_source_map(
1799    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1800    file_path: RcStr,
1801) -> napi::Result<Option<String>> {
1802    let container = project.container;
1803    let ctx = &project.turbopack_ctx;
1804    ctx.turbo_tasks()
1805        .run(async move {
1806            let source_map = get_source_map_rope_operation(container, file_path)
1807                .read_strongly_consistent()
1808                .await?;
1809            let Some(map) = source_map.as_content() else {
1810                return Ok(None);
1811            };
1812            Ok(Some(map.content().to_str()?.to_string()))
1813        })
1814        // HACK: Don't use `TurbopackInternalError`, this function is race-condition prone (the
1815        // source files may have changed or been deleted), so these probably aren't internal errors?
1816        // Ideally we should differentiate.
1817        .await
1818        .map_err(|e| napi::Error::from_reason(PrettyPrintError(&e.into()).to_string()))
1819}
1820
1821#[napi]
1822pub fn project_get_source_map_sync(
1823    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1824    file_path: RcStr,
1825) -> napi::Result<Option<String>> {
1826    within_runtime_if_available(|| {
1827        tokio::runtime::Handle::current().block_on(project_get_source_map(project, file_path))
1828    })
1829}
1830
1831#[napi]
1832pub async fn project_write_analyze_data(
1833    #[napi(ts_arg_type = "{ __napiType: \"Project\" }")] project: External<ProjectInstance>,
1834    app_dir_only: bool,
1835) -> napi::Result<TurbopackResult<()>> {
1836    let container = project.container;
1837    let (issues, diagnostics) = project
1838        .turbopack_ctx
1839        .turbo_tasks()
1840        .run_once(async move {
1841            let analyze_data_op = write_analyze_data_with_issues_operation(container, app_dir_only);
1842            let WriteAnalyzeResult {
1843                issues,
1844                diagnostics,
1845                effects,
1846            } = &*analyze_data_op.read_strongly_consistent().await?;
1847
1848            // Write the files to disk
1849            effects.apply().await?;
1850            Ok((issues.clone(), diagnostics.clone()))
1851        })
1852        .await
1853        .map_err(|e| napi::Error::from_reason(PrettyPrintError(&e).to_string()))?;
1854
1855    Ok(TurbopackResult {
1856        result: (),
1857        issues: issues.iter().map(|i| NapiIssue::from(&**i)).collect(),
1858        diagnostics: diagnostics
1859            .iter()
1860            .map(|d| NapiDiagnostic::from(d))
1861            .collect(),
1862    })
1863}