diff --git a/internal/compiler/program.go b/internal/compiler/program.go index 7c8c16b5c39..483745912ce 100644 --- a/internal/compiler/program.go +++ b/internal/compiler/program.go @@ -1405,9 +1405,10 @@ func (p *Program) Emit(ctx context.Context, options EmitOptions) *EmitResult { } } + newLine := p.Options().NewLine.GetNewLineCharacter() writerPool := &sync.Pool{ New: func() any { - return printer.NewTextWriter(p.Options().NewLine.GetNewLineCharacter()) + return printer.NewTextWriter(newLine) }, } wg := core.NewWorkGroup(p.SingleThreaded()) diff --git a/internal/diagnostics/diagnostics_generated.go b/internal/diagnostics/diagnostics_generated.go index 26789f741c9..607b77b67ae 100644 --- a/internal/diagnostics/diagnostics_generated.go +++ b/internal/diagnostics/diagnostics_generated.go @@ -4274,8 +4274,6 @@ var X_1_implementation = &Message{code: 100008, category: CategoryMessage, key: var Set_the_number_of_projects_to_build_concurrently = &Message{code: 100009, category: CategoryMessage, key: "Set_the_number_of_projects_to_build_concurrently_100009", text: "Set the number of projects to build concurrently."} -var X_all_unless_singleThreaded_is_passed = &Message{code: 100010, category: CategoryMessage, key: "all_unless_singleThreaded_is_passed_100010", text: "all, unless --singleThreaded is passed."} - var Deduplicate_packages_with_the_same_name_and_version = &Message{code: 100011, category: CategoryMessage, key: "Deduplicate_packages_with_the_same_name_and_version_100011", text: "Deduplicate packages with the same name and version."} func keyToMessage(key Key) *Message { @@ -8552,8 +8550,6 @@ func keyToMessage(key Key) *Message { return X_1_implementation case "Set_the_number_of_projects_to_build_concurrently_100009": return Set_the_number_of_projects_to_build_concurrently - case "all_unless_singleThreaded_is_passed_100010": - return X_all_unless_singleThreaded_is_passed case "Deduplicate_packages_with_the_same_name_and_version_100011": return Deduplicate_packages_with_the_same_name_and_version default: diff --git a/internal/diagnostics/extraDiagnosticMessages.json b/internal/diagnostics/extraDiagnosticMessages.json index 2bd4640ff26..287ab6e4499 100644 --- a/internal/diagnostics/extraDiagnosticMessages.json +++ b/internal/diagnostics/extraDiagnosticMessages.json @@ -39,10 +39,6 @@ "category": "Message", "code": 100009 }, - "all, unless --singleThreaded is passed.": { - "category": "Message", - "code": 100010 - }, "Non-relative paths are not allowed. Did you forget a leading './'?": { "category": "Error", "code": 5090 diff --git a/internal/execute/build/buildtask.go b/internal/execute/build/buildtask.go index 36008df36d6..3ead8e4fe2e 100644 --- a/internal/execute/build/buildtask.go +++ b/internal/execute/build/buildtask.go @@ -200,10 +200,6 @@ func (t *BuildTask) updateDownstream(orchestrator *Orchestrator, path tspath.Pat } func (t *BuildTask) compileAndEmit(orchestrator *Orchestrator, path tspath.Path) { - if orchestrator.buildSemaphore != nil { - orchestrator.buildSemaphore <- struct{}{} // acquire slot - defer func() { <-orchestrator.buildSemaphore }() // release slot - } t.errors = nil if orchestrator.opts.Command.BuildOptions.Verbose.IsTrue() { t.result.reportStatus(ast.NewCompilerDiagnostic(diagnostics.Building_project_0, orchestrator.relativeFileName(t.config))) diff --git a/internal/execute/build/host.go b/internal/execute/build/host.go index 8c72970cea1..99563863bdb 100644 --- a/internal/execute/build/host.go +++ b/internal/execute/build/host.go @@ -51,14 +51,15 @@ func (h *host) Trace(msg *diagnostics.Message, args ...any) { } func (h *host) GetSourceFile(opts ast.SourceFileParseOptions) *ast.SourceFile { - // Cache dts and json files as they will be reused - return h.sourceFiles.loadOrStoreNewIf(opts, h.host.GetSourceFile, func(value *ast.SourceFile) bool { - return value != nil && (tspath.IsDeclarationFileName(opts.FileName) || tspath.FileExtensionIs(opts.FileName, tspath.ExtensionJson)) - }) + if tspath.IsDeclarationFileName(opts.FileName) || tspath.FileExtensionIs(opts.FileName, tspath.ExtensionJson) { + // Cache dts and json files as they will be reused + return h.sourceFiles.loadOrStore(opts, h.host.GetSourceFile, false /* allowZero */) + } + return h.host.GetSourceFile(opts) } func (h *host) GetResolvedProjectReference(fileName string, path tspath.Path) *tsoptions.ParsedCommandLine { - return h.resolvedReferences.loadOrStoreNew(path, func(path tspath.Path) *tsoptions.ParsedCommandLine { + return h.resolvedReferences.loadOrStore(path, func(path tspath.Path) *tsoptions.ParsedCommandLine { configStart := h.orchestrator.opts.Sys.Now() // Wrap command line options in "compilerOptions" key to match tsconfig.json structure var commandLineRaw *collections.OrderedMap[string, any] @@ -71,7 +72,7 @@ func (h *host) GetResolvedProjectReference(fileName string, path tspath.Path) *t configTime := h.orchestrator.opts.Sys.Now().Sub(configStart) h.configTimes.Store(path, configTime) return commandLine - }) + }, true /* allowZero */) } func (h *host) ReadBuildInfo(config *tsoptions.ParsedCommandLine) *incremental.BuildInfo { diff --git a/internal/execute/build/orchestrator.go b/internal/execute/build/orchestrator.go index 13ce353c69c..c3658cec210 100644 --- a/internal/execute/build/orchestrator.go +++ b/internal/execute/build/orchestrator.go @@ -61,8 +61,6 @@ type Orchestrator struct { tasks *collections.SyncMap[tspath.Path, *BuildTask] order []string errors []*ast.Diagnostic - // Semaphore to limit concurrent builds - buildSemaphore chan struct{} errorSummaryReporter tsc.DiagnosticsReporter watchStatusReporter tsc.DiagnosticReporter @@ -240,14 +238,9 @@ func (o *Orchestrator) Watch() { func (o *Orchestrator) updateWatch() { oldCache := o.host.mTimes o.host.mTimes = &collections.SyncMap[tspath.Path, time.Time]{} - wg := core.NewWorkGroup(o.opts.Command.CompilerOptions.SingleThreaded.IsTrue()) - o.tasks.Range(func(path tspath.Path, task *BuildTask) bool { - wg.Queue(func() { - task.updateWatch(o, oldCache) - }) - return true + o.rangeTask(func(path tspath.Path, task *BuildTask) { + task.updateWatch(o, oldCache) }) - wg.RunAndWait() } func (o *Orchestrator) resetCaches() { @@ -263,20 +256,14 @@ func (o *Orchestrator) DoCycle() { var needsConfigUpdate atomic.Bool var needsUpdate atomic.Bool mTimes := o.host.mTimes.Clone() - wg := core.NewWorkGroup(o.opts.Command.CompilerOptions.SingleThreaded.IsTrue()) - o.tasks.Range(func(path tspath.Path, task *BuildTask) bool { - wg.Queue(func() { - if updateKind := task.hasUpdate(o, path); updateKind != updateKindNone { - needsUpdate.Store(true) - if updateKind == updateKindConfig { - needsConfigUpdate.Store(true) - } + o.rangeTask(func(path tspath.Path, task *BuildTask) { + if updateKind := task.hasUpdate(o, path); updateKind != updateKindNone { + needsUpdate.Store(true) + if updateKind == updateKindConfig { + needsConfigUpdate.Store(true) } - }) - // Watch for file changes - return true + } }) - wg.RunAndWait() if !needsUpdate.Load() { o.host.mTimes = mTimes @@ -307,11 +294,9 @@ func (o *Orchestrator) buildOrClean() tsc.CommandLineResult { var buildResult orchestratorResult if len(o.errors) == 0 { buildResult.statistics.Projects = len(o.Order()) - if o.opts.Command.CompilerOptions.SingleThreaded.IsTrue() { - o.singleThreadedBuildOrClean(&buildResult) - } else { - o.multiThreadedBuildOrClean(&buildResult) - } + o.rangeTask(func(path tspath.Path, task *BuildTask) { + o.buildOrCleanProject(task, path, &buildResult) + }) } else { // Circularity errors prevent any project from being built buildResult.result.Status = tsc.ExitStatusProjectReferenceCycle_OutputsSkipped @@ -325,25 +310,40 @@ func (o *Orchestrator) buildOrClean() tsc.CommandLineResult { return buildResult.result } -func (o *Orchestrator) singleThreadedBuildOrClean(buildResult *orchestratorResult) { - // Go in the order since only one project can be built at a time so that random order isnt picked by work group creating deadlock - for _, config := range o.Order() { +func (o *Orchestrator) rangeTask(f func(path tspath.Path, task *BuildTask)) { + numRoutines := 4 + if o.opts.Command.CompilerOptions.SingleThreaded.IsTrue() { + numRoutines = 1 + } else if builders := o.opts.Command.BuildOptions.Builders; builders != nil { + numRoutines = *builders + } + + var currentTaskIndex atomic.Int64 + getNextTask := func() (tspath.Path, *BuildTask, bool) { + index := int(currentTaskIndex.Add(1) - 1) + if index >= len(o.order) { + return "", nil, false + } + config := o.order[index] path := o.toPath(config) task := o.getTask(path) - o.buildOrCleanProject(task, path, buildResult) + return path, task, true + } + runTask := func() { + for path, task, ok := getNextTask(); ok; path, task, ok = getNextTask() { + f(path, task) + } } -} -func (o *Orchestrator) multiThreadedBuildOrClean(buildResult *orchestratorResult) { - // Spin off the threads with waiting on upstream to build before actual project build - wg := core.NewWorkGroup(false) - o.tasks.Range(func(path tspath.Path, task *BuildTask) bool { - wg.Queue(func() { - o.buildOrCleanProject(task, path, buildResult) - }) - return true - }) - wg.RunAndWait() + if numRoutines == 1 { + runTask() + } else { + wg := core.NewWorkGroup(false) + for range numRoutines { + wg.Queue(runTask) + } + wg.RunAndWait() + } } func (o *Orchestrator) buildOrCleanProject(task *BuildTask, path tspath.Path, buildResult *orchestratorResult) { @@ -398,9 +398,5 @@ func NewOrchestrator(opts Options) *Orchestrator { } else { orchestrator.errorSummaryReporter = tsc.CreateReportErrorSummary(opts.Sys, opts.Command.Locale(), opts.Command.CompilerOptions) } - // If we want to build more than one project at a time, create a semaphore to limit concurrency - if builders := opts.Command.BuildOptions.Builders; builders != nil { - orchestrator.buildSemaphore = make(chan struct{}, *builders) - } return orchestrator } diff --git a/internal/execute/build/parseCache.go b/internal/execute/build/parseCache.go index 73bc2636124..29268e6407b 100644 --- a/internal/execute/build/parseCache.go +++ b/internal/execute/build/parseCache.go @@ -6,27 +6,23 @@ import ( "github.com/microsoft/typescript-go/internal/collections" ) -type parseCacheEntry[V any] struct { +type parseCacheEntry[V comparable] struct { value V mu sync.Mutex } -type parseCache[K comparable, V any] struct { +type parseCache[K comparable, V comparable] struct { entries collections.SyncMap[K, *parseCacheEntry[V]] } -func (c *parseCache[K, V]) loadOrStoreNew(key K, parse func(K) V) V { - return c.loadOrStoreNewIf(key, parse, func(value V) bool { return true }) -} - -func (c *parseCache[K, V]) loadOrStoreNewIf(key K, parse func(K) V, canCacheValue func(V) bool) V { +func (c *parseCache[K, V]) loadOrStore(key K, parse func(K) V, allowZero bool) V { newEntry := &parseCacheEntry[V]{} newEntry.mu.Lock() defer newEntry.mu.Unlock() if entry, loaded := c.entries.LoadOrStore(key, newEntry); loaded { entry.mu.Lock() defer entry.mu.Unlock() - if canCacheValue(entry.value) { + if allowZero || entry.value != *new(V) { return entry.value } newEntry = entry diff --git a/internal/tsoptions/declsbuild.go b/internal/tsoptions/declsbuild.go index 1c284c313a8..eb7e02f1e7b 100644 --- a/internal/tsoptions/declsbuild.go +++ b/internal/tsoptions/declsbuild.go @@ -54,7 +54,7 @@ var OptionsForBuild = []*CommandLineOption{ Kind: CommandLineOptionTypeNumber, Category: diagnostics.Command_line_Options, Description: diagnostics.Set_the_number_of_projects_to_build_concurrently, - DefaultValueDescription: diagnostics.X_all_unless_singleThreaded_is_passed, + DefaultValueDescription: diagnostics.X_4_unless_singleThreaded_is_passed, minValue: 1, }, {