-
-
Notifications
You must be signed in to change notification settings - Fork 59
feat: streaming support #537
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. Weβll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Bundle Size Analysis
|
|
@birkskyum in the works fyi |
|
@harlan-zw sounds good - looking forward to react and solid support for this, as streaming is a must-have for adoption. |
|
This is mostly working for react I just don't fully understand how the suspense boundaries are being resolved and can't hook in |
|
Hi @harlan-zw! |
675bb06 to
4451fda
Compare
| app.use(async (req, res) => { | ||
| const url = req.originalUrl | ||
|
|
||
| let template, render | ||
| if (!isProd) { | ||
| template = fs.readFileSync(resolve('index.html'), 'utf-8') | ||
| template = await vite.transformIndexHtml(url, template) | ||
| render = (await vite.ssrLoadModule('/src/entry-server.ts')).render | ||
| } | ||
| else { | ||
| template = indexProd | ||
| render = (await import('./dist/server/entry-server.js')).render | ||
| } | ||
|
|
||
| const { svelteStream, head } = render(url) | ||
|
|
||
| res.status(200).set({ 'Content-Type': 'text/html; charset=utf-8' }) | ||
|
|
||
| // Client script is injected via Vite plugin's transformIndexHtml | ||
| for await (const chunk of streamWithHead(svelteStream, template, head, { debug: true })) { | ||
| if (res.closed) break | ||
| res.write(chunk) | ||
| } | ||
| res.end() | ||
| }) |
Check failure
Code scanning / CodeQL
Missing rate limiting
Show autofix suggestion
Hide autofix suggestion
Copilot Autofix
AI 26 days ago
In general, the way to fix this problem is to introduce rate limiting for the HTTP routes that perform expensive operations, using a middleware such as express-rate-limit. The middleware should be applied before the relevant route handler so that excessive requests from any given client are rejected or delayed, preventing the server from being overwhelmed by repeated expensive work (like filesystem access).
For this specific file, the best minimal fix is to add express-rate-limit and use it as middleware on the app instance before the catchβall app.use(async (req, res) => { ... }) handler. We will:
- Import or require
express-rate-limitat the top of the file (while preserving existing imports). - Create a limiter instance (for example, 100 requests per 15 minutes per IP, matching the background example) after the Express app is created (
const app = express()). - Apply
app.use(limiter)before the mainapp.use(async (req, res) => { ... })handler, so all incoming requests are subject to rate limiting.
All changes will be confined to examples/vite-ssr-svelte-streaming/server.js. We will not alter existing logic around Vite, template rendering, or streaming; only add the import, limiter definition, and app.use(limiter) line.
-
Copy modified line R5 -
Copy modified lines R24-R30
| @@ -2,6 +2,7 @@ | ||
| import path from 'node:path' | ||
| import { fileURLToPath } from 'node:url' | ||
| import express from 'express' | ||
| import rateLimit from 'express-rate-limit' | ||
| import { streamWithHead } from '@unhead/svelte/stream/server' | ||
|
|
||
| const isTest = process.env.NODE_ENV === 'test' || !!process.env.VITE_TEST_BUILD | ||
| @@ -20,6 +21,13 @@ | ||
|
|
||
| const app = express() | ||
|
|
||
| const limiter = rateLimit({ | ||
| windowMs: 15 * 60 * 1000, // 15 minutes | ||
| max: 100, // limit each IP to 100 requests per windowMs | ||
| }) | ||
|
|
||
| app.use(limiter) | ||
|
|
||
| let vite | ||
| if (!isProd) { | ||
| vite = await ( |
-
Copy modified lines R18-R19
| @@ -15,7 +15,8 @@ | ||
| "@unhead/svelte": "workspace:*", | ||
| "compression": "^1.8.1", | ||
| "express": "^5.2.1", | ||
| "sirv": "^3.0.2" | ||
| "sirv": "^3.0.2", | ||
| "express-rate-limit": "^8.2.1" | ||
| }, | ||
| "devDependencies": { | ||
| "@playwright/test": "^1.57.0", |
| Package | Version | Security advisories |
| express-rate-limit (npm) | 8.2.1 | None |
| app.use('*all', async (req, res) => { | ||
| try { | ||
| const url = req.originalUrl.replace(base, '') | ||
|
|
||
| /** @type {string} */ | ||
| let template | ||
| /** @type {import('./src/entry-server.tsx').render} */ | ||
| let render | ||
| if (!isProduction) { | ||
| // Always read fresh template in development | ||
| template = await fs.readFile('./index.html', 'utf-8') | ||
| template = await vite.transformIndexHtml(url, template) | ||
| render = (await vite.ssrLoadModule('/src/entry-server.tsx')).render | ||
| } else { | ||
| template = templateHtml | ||
| render = (await import('./dist/server/entry-server.js')).render | ||
| } | ||
|
|
||
| let didError = false | ||
|
|
||
| const { pipe, abort, head } = render(url, { | ||
| onShellError() { | ||
| res.status(500) | ||
| res.set({ 'Content-Type': 'text/html' }) | ||
| res.send('<h1>Something went wrong</h1>') | ||
| }, | ||
| async onShellReady() { | ||
| res.status(didError ? 500 : 200) | ||
| res.set({ 'Content-Type': 'text/html' }) | ||
|
|
||
| // Inject streaming head support | ||
| let processedTemplate = template | ||
| if (head) { | ||
| processedTemplate = await renderSSRHeadShell(head, template) | ||
| } | ||
|
|
||
| const [htmlStart, htmlEnd] = processedTemplate.split(`<!--app-html-->`) | ||
| let htmlEnded = false | ||
|
|
||
| const transformStream = new Transform({ | ||
| transform(chunk, encoding, callback) { | ||
| console.log('chunk', chunk) | ||
| // See entry-server.tsx for more details of this code | ||
| if (!htmlEnded) { | ||
| chunk = chunk.toString() | ||
| if (chunk.endsWith('<vite-streaming-end></vite-streaming-end>')) { | ||
| res.write(chunk.slice(0, -41) + htmlEnd, 'utf-8') | ||
| } else { | ||
| res.write(chunk, 'utf-8') | ||
| } | ||
| } else { | ||
| res.write(chunk, encoding) | ||
| } | ||
| callback() | ||
| }, | ||
| }) | ||
|
|
||
| transformStream.on('finish', () => { | ||
| res.end() | ||
| }) | ||
|
|
||
| res.write(htmlStart) | ||
|
|
||
| pipe(transformStream) | ||
| }, | ||
| onError(error) { | ||
| didError = true | ||
| console.error(error) | ||
| }, | ||
| }) | ||
|
|
||
| setTimeout(() => { | ||
| abort() | ||
| }, ABORT_DELAY) | ||
| } catch (e) { | ||
| vite?.ssrFixStacktrace(e) | ||
| console.log(e.stack) | ||
| res.status(500).end(e.stack) | ||
| } | ||
| }) |
Check failure
Code scanning / CodeQL
Missing rate limiting
Show autofix suggestion
Hide autofix suggestion
Copilot Autofix
AI 25 days ago
In general, to fix missing rate limiting in an Express application, you introduce a rate-limiting middleware (for example, express-rate-limit) and apply it to the relevant routes or the whole app. The middleware enforces a maximum number of requests from a given client within a time window, helping protect expensive handlers from abuse and DoS.
For this file, the best fix with minimal behavioral change is:
- Import and configure a rate limiter once at the top level.
- Apply the rate limiter middleware only to the expensive HTML-serving route, i.e., the
app.use('*all', async (req, res) => { ... })handler, rather than globally, to avoid unintended side effects on other middleware such as Vite or static serving. - Keep the existing handler logic unchanged; only wrap it with the limiter.
Concretely:
-
Add an import (or
require) for a well-known rate limiting library. Since this file uses ES module syntax (import), we can use dynamic import inside anasyncIIFE or top-levelawait. However, the instructions allow adding imports of well-known external libraries, and the file already uses top-levelawaitforfs.readFileand dynamicimportforvite,compression, andsirv, so we can follow the same pattern: dynamically importexpress-rate-limitwhen setting up the app. -
Define a limiter instance, e.g.:
const { default: rateLimit } = await import('express-rate-limit') const ssrLimiter = rateLimit({ windowMs: 15 * 60 * 1000, max: 100, })
or equivalent.
-
Apply
ssrLimiterto the SSR route by changing:app.use('*all', async (req, res) => {
to:
app.use('*all', ssrLimiter, async (req, res) => {
This ensures that rate limiting is enforced before the expensive operations in this handler while leaving other middleware behavior intact.
All changes are confined to examples/vite-ssr-react-streaming-simple/server.js: one new dynamic import/limiter definition in the setup section, and one modification of the app.use('*all', ...) call to insert the middleware.
-
Copy modified lines R20-R26 -
Copy modified line R46
| @@ -17,6 +17,13 @@ | ||
| // Create http server | ||
| const app = express() | ||
|
|
||
| // Rate limiting for SSR route | ||
| const { default: rateLimit } = await import('express-rate-limit') | ||
| const ssrLimiter = rateLimit({ | ||
| windowMs: 15 * 60 * 1000, // 15 minutes | ||
| max: 100, // limit each IP to 100 requests per windowMs | ||
| }) | ||
|
|
||
| // Add Vite or respective production middlewares | ||
| /** @type {import('vite').ViteDevServer | undefined} */ | ||
| let vite | ||
| @@ -36,7 +43,7 @@ | ||
| } | ||
|
|
||
| // Serve HTML | ||
| app.use('*all', async (req, res) => { | ||
| app.use('*all', ssrLimiter, async (req, res) => { | ||
| try { | ||
| const url = req.originalUrl.replace(base, '') | ||
|
|
-
Copy modified lines R19-R20
| @@ -16,7 +16,8 @@ | ||
| "express": "^5.2.1", | ||
| "react": "^19.2.1", | ||
| "react-dom": "^19.2.1", | ||
| "sirv": "^3.0.2" | ||
| "sirv": "^3.0.2", | ||
| "express-rate-limit": "^8.2.1" | ||
| }, | ||
| "devDependencies": { | ||
| "@types/express": "^5.0.6", |
| Package | Version | Security advisories |
| express-rate-limit (npm) | 8.2.1 | None |
| } catch (e) { | ||
| vite?.ssrFixStacktrace(e) | ||
| console.log(e.stack) | ||
| res.status(500).end(e.stack) |
Check warning
Code scanning / CodeQL
Exception text reinterpreted as HTML
Show autofix suggestion
Hide autofix suggestion
Copilot Autofix
AI 25 days ago
To fix the problem, avoid sending the raw exception stack trace directly to the HTTP response in a way that the browser interprets as HTML. Instead, send a generic error message (optionally with a 500 status code) and log the detailed error, including its stack trace, only on the server. If you still need to expose some error information to the client, you must HTML-escape it before sending, or use a non-HTML content type such as text/plain with proper escaping; however, for production SSR errors, a generic message is usually preferred.
The minimal, behavior-preserving (from an API-contract perspective) and safe change here is:
- In the catch block (
catch (e) { ... }), keepvite?.ssrFixStacktrace(e)andconsole.log(e.stack)as they are (for server-side debugging). - Replace
res.status(500).end(e.stack)with a response that does not reveal the stack trace, such asres.status(500).send('Internal Server Error'), or, if you want to be consistent with the earlier shell error handler, an HTML snippet like<h1>Something went wrong</h1>. Both options avoid reflecting any user-controlled content.
Concretely, in examples/vite-ssr-react-streaming-simple/server.js, modify the lines 113β117 catch block so that the only thing sent to the client is a generic message and the status code remains 500. No new imports or helper functions are strictly needed for this simple fix.
-
Copy modified line R116
| @@ -113,7 +113,7 @@ | ||
| } catch (e) { | ||
| vite?.ssrFixStacktrace(e) | ||
| console.log(e.stack) | ||
| res.status(500).end(e.stack) | ||
| res.status(500).send('<h1>Something went wrong</h1>') | ||
| } | ||
| }) | ||
|
|
| } catch (e) { | ||
| vite?.ssrFixStacktrace(e) | ||
| console.log(e.stack) | ||
| res.status(500).end(e.stack) |
Check warning
Code scanning / CodeQL
Information exposure through a stack trace
Show autofix suggestion
Hide autofix suggestion
Copilot Autofix
AI 25 days ago
In general, the fix is to stop including the stack trace (or any detailed exception object) in HTTP responses, and instead log it only on the server while sending a generic, user-friendly error message to the client. This prevents disclosure of internal file paths, implementation details, and potentially sensitive data embedded in error messages.
In this file, the necessary change is confined to the catch (e) { ... } block of the app.use('*all', ...) middleware. We should keep or improve the server-side logging (e.g., log e and/or e.stack), but replace res.status(500).end(e.stack) with a generic error message that does not reveal implementation details. To avoid changing existing functionality more than necessary, we can keep the HTTP 500 status code and just change the response body. A reasonable minimal change is:
- Leave
vite?.ssrFixStacktrace(e)as-is (it only adjusts the stack trace for server-side logging / debugging). - Optionally keep
console.log(e.stack)or upgrade it toconsole.error(e); this does not affect the client. - Change
res.status(500).end(e.stack)tores.status(500).end('Internal Server Error')(or similarly generic text).
All edits are within examples/vite-ssr-react-streaming-simple/server.js, and no new imports or helpers are required.
-
Copy modified line R116
| @@ -113,7 +113,7 @@ | ||
| } catch (e) { | ||
| vite?.ssrFixStacktrace(e) | ||
| console.log(e.stack) | ||
| res.status(500).end(e.stack) | ||
| res.status(500).end('Internal Server Error') | ||
| } | ||
| }) | ||
|
|
|
Really excited about this work - thanks for looking into it! |
5cb1c27 to
618b946
Compare
618b946 to
c609271
Compare
|
Initial version is available in 3.0.0-beta.5 with support for React, Vue, Solid.js, and Svelte. They all require Vite for the time being. |
π Linked issue
#396
β Type of change
π Description
Updates head tags dynamically as suspense boundaries resolve during streaming SSR.
How it works
The streaming system uses a queue-based pattern where the server renders head entries (title, meta, links) and immediately renders them as inline
<script>tags that push data to a global queue.On the client side, a bootstrap script creates a queue (
window.__unhead__) that collects these entries as they stream in before the main JavaScript bundle loads. Once the client head instance initializes, it processes all queued entries and takes over future pushes directly.This approach solves the race condition between streaming chunks arriving and client JS loading, ensuring no head entries are ever lost regardless of timing and allows unhead core to dedupe and sort as needed.
1. Shell injects queue stub
2. Suspense chunks push updates
3. IIFE consumes queue & renders to DOM
Example (Vue)
Documentation
Related PRs
resolveTags()Β #622renderDOMHead()Β #628renderSSRHead()Β #629