diff --git a/quartz/bootstrap-cli.mjs b/quartz/bootstrap-cli.mjs
index 47c58ab..1656d75 100755
--- a/quartz/bootstrap-cli.mjs
+++ b/quartz/bootstrap-cli.mjs
@@ -393,10 +393,16 @@ See the [documentation](https://quartz.jzhao.xyz) for how to get started.
     })
 
     const buildMutex = new Mutex()
-    const timeoutIds = new Set()
+    let lastBuildMs = 0
     let cleanupBuild = null
     const build = async (clientRefresh) => {
+      const buildStart = new Date().getTime()
+      lastBuildMs = buildStart
       const release = await buildMutex.acquire()
+      if (lastBuildMs > buildStart) {
+        release()
+        return
+      }
 
       if (cleanupBuild) {
         await cleanupBuild()
@@ -428,12 +434,6 @@ See the [documentation](https://quartz.jzhao.xyz) for how to get started.
       clientRefresh()
     }
 
-    const rebuild = (clientRefresh) => {
-      timeoutIds.forEach((id) => clearTimeout(id))
-      timeoutIds.clear()
-      timeoutIds.add(setTimeout(() => build(clientRefresh), 250))
-    }
-
     if (argv.serve) {
       const connections = []
       const clientRefresh = () => connections.forEach((conn) => conn.send("rebuild"))
@@ -539,7 +539,7 @@ See the [documentation](https://quartz.jzhao.xyz) for how to get started.
           ignoreInitial: true,
         })
         .on("all", async () => {
-          rebuild(clientRefresh)
+          build(clientRefresh)
         })
     } else {
       await build(() => {})
diff --git a/quartz/build.ts b/quartz/build.ts
index 8b1d318..58137d1 100644
--- a/quartz/build.ts
+++ b/quartz/build.ts
@@ -81,7 +81,7 @@ async function startServing(
   }
 
   const initialSlugs = ctx.allSlugs
-  const timeoutIds: Set<ReturnType<typeof setTimeout>> = new Set()
+  let lastBuildMs = 0
   const toRebuild: Set<FilePath> = new Set()
   const toRemove: Set<FilePath> = new Set()
   const trackedAssets: Set<FilePath> = new Set()
@@ -111,49 +111,50 @@ async function startServing(
     }
 
     // debounce rebuilds every 250ms
-    timeoutIds.add(
-      setTimeout(async () => {
-        const release = await mut.acquire()
-        timeoutIds.forEach((id) => clearTimeout(id))
-        timeoutIds.clear()
 
-        const perf = new PerfTimer()
-        console.log(chalk.yellow("Detected change, rebuilding..."))
-        try {
-          const filesToRebuild = [...toRebuild].filter((fp) => !toRemove.has(fp))
+    const buildStart = new Date().getTime()
+    lastBuildMs = buildStart
+    const release = await mut.acquire()
+    if (lastBuildMs > buildStart) {
+      release()
+      return
+    }
 
-          const trackedSlugs = [...new Set([...contentMap.keys(), ...toRebuild, ...trackedAssets])]
-            .filter((fp) => !toRemove.has(fp))
-            .map((fp) => slugifyFilePath(path.posix.relative(argv.directory, fp) as FilePath))
+    const perf = new PerfTimer()
+    console.log(chalk.yellow("Detected change, rebuilding..."))
+    try {
+      const filesToRebuild = [...toRebuild].filter((fp) => !toRemove.has(fp))
 
-          ctx.allSlugs = [...new Set([...initialSlugs, ...trackedSlugs])]
-          const parsedContent = await parseMarkdown(ctx, filesToRebuild)
-          for (const content of parsedContent) {
-            const [_tree, vfile] = content
-            contentMap.set(vfile.data.filePath!, content)
-          }
+      const trackedSlugs = [...new Set([...contentMap.keys(), ...toRebuild, ...trackedAssets])]
+        .filter((fp) => !toRemove.has(fp))
+        .map((fp) => slugifyFilePath(path.posix.relative(argv.directory, fp) as FilePath))
 
-          for (const fp of toRemove) {
-            contentMap.delete(fp)
-          }
+      ctx.allSlugs = [...new Set([...initialSlugs, ...trackedSlugs])]
+      const parsedContent = await parseMarkdown(ctx, filesToRebuild)
+      for (const content of parsedContent) {
+        const [_tree, vfile] = content
+        contentMap.set(vfile.data.filePath!, content)
+      }
 
-          // TODO: we can probably traverse the link graph to figure out what's safe to delete here
-          // instead of just deleting everything
-          await rimraf(argv.output)
-          const parsedFiles = [...contentMap.values()]
-          const filteredContent = filterContent(ctx, parsedFiles)
-          await emitContent(ctx, filteredContent)
-          console.log(chalk.green(`Done rebuilding in ${perf.timeSince()}`))
-        } catch {
-          console.log(chalk.yellow(`Rebuild failed. Waiting on a change to fix the error...`))
-        }
+      for (const fp of toRemove) {
+        contentMap.delete(fp)
+      }
 
-        clientRefresh()
-        toRebuild.clear()
-        toRemove.clear()
-        release()
-      }, 250),
-    )
+      const parsedFiles = [...contentMap.values()]
+      const filteredContent = filterContent(ctx, parsedFiles)
+      // TODO: we can probably traverse the link graph to figure out what's safe to delete here
+      // instead of just deleting everything
+      await rimraf(argv.output)
+      await emitContent(ctx, filteredContent)
+      console.log(chalk.green(`Done rebuilding in ${perf.timeSince()}`))
+    } catch {
+      console.log(chalk.yellow(`Rebuild failed. Waiting on a change to fix the error...`))
+    }
+
+    clientRefresh()
+    toRebuild.clear()
+    toRemove.clear()
+    release()
   }
 
   const watcher = chokidar.watch(".", {
@@ -168,7 +169,6 @@ async function startServing(
     .on("unlink", (fp) => rebuild(fp, "delete"))
 
   return async () => {
-    timeoutIds.forEach((id) => clearTimeout(id))
     await watcher.close()
   }
 }