mirror of
https://github.com/alrayyes/wiki.git
synced 2024-11-22 11:36:23 +00:00
fix rebuild debouncing
This commit is contained in:
parent
07a327e05a
commit
0998bc355e
3 changed files with 42 additions and 37 deletions
|
@ -4,9 +4,8 @@ draft: true
|
|||
|
||||
## todo
|
||||
|
||||
- debounce cfg rebuild on large repos
|
||||
- investigate content rebuild triggering multiple times even when debounced, causing an esbuild deadlock
|
||||
- dereference symlink for npx quartz sync
|
||||
- prompt user as to whether to do it (it's expensive for large vaults)
|
||||
|
||||
## high priority backlog
|
||||
|
||||
|
|
|
@ -355,6 +355,7 @@ See the [documentation](https://quartz.jzhao.xyz) for how to get started.
|
|||
],
|
||||
})
|
||||
|
||||
const timeoutIds = new Set()
|
||||
const build = async (clientRefresh) => {
|
||||
const result = await ctx.rebuild().catch((err) => {
|
||||
console.error(`${chalk.red("Couldn't parse Quartz configuration:")} ${fp}`)
|
||||
|
@ -380,6 +381,11 @@ See the [documentation](https://quartz.jzhao.xyz) for how to get started.
|
|||
clientRefresh()
|
||||
}
|
||||
|
||||
const rebuild = (clientRefresh) => {
|
||||
timeoutIds.forEach((id) => clearTimeout(id))
|
||||
timeoutIds.add(setTimeout(() => build(clientRefresh), 250))
|
||||
}
|
||||
|
||||
if (argv.serve) {
|
||||
const wss = new WebSocketServer({ port: 3001 })
|
||||
const connections = []
|
||||
|
@ -457,7 +463,7 @@ See the [documentation](https://quartz.jzhao.xyz) for how to get started.
|
|||
})
|
||||
.on("all", async () => {
|
||||
console.log(chalk.yellow("Detected a source code change, doing a hard rebuild..."))
|
||||
await build(clientRefresh)
|
||||
rebuild(clientRefresh)
|
||||
})
|
||||
} else {
|
||||
await build(() => {})
|
||||
|
|
|
@ -77,7 +77,7 @@ async function startServing(
|
|||
}
|
||||
|
||||
const initialSlugs = ctx.allSlugs
|
||||
let timeoutId: ReturnType<typeof setTimeout> | null = null
|
||||
let timeoutIds: Set<ReturnType<typeof setTimeout>> = new Set()
|
||||
let toRebuild: Set<FilePath> = new Set()
|
||||
let toRemove: Set<FilePath> = new Set()
|
||||
let trackedAssets: Set<FilePath> = new Set()
|
||||
|
@ -106,45 +106,45 @@ async function startServing(
|
|||
toRemove.add(filePath)
|
||||
}
|
||||
|
||||
if (timeoutId) {
|
||||
clearTimeout(timeoutId)
|
||||
}
|
||||
timeoutIds.forEach((id) => clearTimeout(id))
|
||||
|
||||
// debounce rebuilds every 250ms
|
||||
timeoutId = setTimeout(async () => {
|
||||
const perf = new PerfTimer()
|
||||
console.log(chalk.yellow("Detected change, rebuilding..."))
|
||||
try {
|
||||
const filesToRebuild = [...toRebuild].filter((fp) => !toRemove.has(fp))
|
||||
timeoutIds.add(
|
||||
setTimeout(async () => {
|
||||
const perf = new PerfTimer()
|
||||
console.log(chalk.yellow("Detected change, rebuilding..."))
|
||||
try {
|
||||
const filesToRebuild = [...toRebuild].filter((fp) => !toRemove.has(fp))
|
||||
|
||||
const trackedSlugs = [...new Set([...contentMap.keys(), ...toRebuild, ...trackedAssets])]
|
||||
.filter((fp) => !toRemove.has(fp))
|
||||
.map((fp) => slugifyFilePath(path.posix.relative(argv.directory, fp) as FilePath))
|
||||
const trackedSlugs = [...new Set([...contentMap.keys(), ...toRebuild, ...trackedAssets])]
|
||||
.filter((fp) => !toRemove.has(fp))
|
||||
.map((fp) => slugifyFilePath(path.posix.relative(argv.directory, fp) as FilePath))
|
||||
|
||||
ctx.allSlugs = [...new Set([...initialSlugs, ...trackedSlugs])]
|
||||
const parsedContent = await parseMarkdown(ctx, filesToRebuild)
|
||||
for (const content of parsedContent) {
|
||||
const [_tree, vfile] = content
|
||||
contentMap.set(vfile.data.filePath!, content)
|
||||
ctx.allSlugs = [...new Set([...initialSlugs, ...trackedSlugs])]
|
||||
const parsedContent = await parseMarkdown(ctx, filesToRebuild)
|
||||
for (const content of parsedContent) {
|
||||
const [_tree, vfile] = content
|
||||
contentMap.set(vfile.data.filePath!, content)
|
||||
}
|
||||
|
||||
for (const fp of toRemove) {
|
||||
contentMap.delete(fp)
|
||||
}
|
||||
|
||||
await rimraf(argv.output)
|
||||
const parsedFiles = [...contentMap.values()]
|
||||
const filteredContent = filterContent(ctx, parsedFiles)
|
||||
await emitContent(ctx, filteredContent)
|
||||
console.log(chalk.green(`Done rebuilding in ${perf.timeSince()}`))
|
||||
} catch {
|
||||
console.log(chalk.yellow(`Rebuild failed. Waiting on a change to fix the error...`))
|
||||
}
|
||||
|
||||
for (const fp of toRemove) {
|
||||
contentMap.delete(fp)
|
||||
}
|
||||
|
||||
await rimraf(argv.output)
|
||||
const parsedFiles = [...contentMap.values()]
|
||||
const filteredContent = filterContent(ctx, parsedFiles)
|
||||
await emitContent(ctx, filteredContent)
|
||||
console.log(chalk.green(`Done rebuilding in ${perf.timeSince()}`))
|
||||
} catch {
|
||||
console.log(chalk.yellow(`Rebuild failed. Waiting on a change to fix the error...`))
|
||||
}
|
||||
|
||||
clientRefresh()
|
||||
toRebuild.clear()
|
||||
toRemove.clear()
|
||||
}, 250)
|
||||
clientRefresh()
|
||||
toRebuild.clear()
|
||||
toRemove.clear()
|
||||
}, 250),
|
||||
)
|
||||
}
|
||||
|
||||
const watcher = chokidar.watch(".", {
|
||||
|
|
Loading…
Reference in a new issue