Compare commits

..

1 Commits

Author SHA1 Message Date
Claude Bot
71f1057027 fix: use workspace path as cache key instead of name
Fixes a bug where workspaces with the same package name but in different
paths would incorrectly report "Workspace name already exists" errors.

The deduplication check was using the package name as the cache key,
which caused false positives when multiple workspace packages had the
same name but different paths (and potentially different versions).

Changed to use the workspace path as the cache key, which is the unique
identifier for each workspace. This aligns with how pnpm handles
workspace packages - allowing duplicate names with different versions.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-10-11 23:36:32 +00:00
7 changed files with 98 additions and 214 deletions

View File

@@ -892,8 +892,6 @@ pub fn NewServer(protocol_enum: enum { http, https }, development_kind: enum { d
}
}
var fetch_headers_to_use: ?*WebCore.FetchHeaders = null;
if (optional) |opts| {
getter: {
if (opts.isEmptyOrUndefinedOrNull()) {
@@ -917,7 +915,7 @@ pub fn NewServer(protocol_enum: enum { http, https }, development_kind: enum { d
break :getter;
}
fetch_headers_to_use = headers_value.as(WebCore.FetchHeaders) orelse brk: {
var fetch_headers_to_use: *WebCore.FetchHeaders = headers_value.as(WebCore.FetchHeaders) orelse brk: {
if (headers_value.isObject()) {
if (try WebCore.FetchHeaders.createFromJS(globalThis, headers_value)) |fetch_headers| {
fetch_headers_to_deref = fetch_headers;
@@ -936,13 +934,17 @@ pub fn NewServer(protocol_enum: enum { http, https }, development_kind: enum { d
return error.JSError;
}
if (fetch_headers_to_use.?.fastGet(.SecWebSocketProtocol)) |protocol| {
if (fetch_headers_to_use.fastGet(.SecWebSocketProtocol)) |protocol| {
sec_websocket_protocol = protocol;
}
if (fetch_headers_to_use.?.fastGet(.SecWebSocketExtensions)) |protocol| {
if (fetch_headers_to_use.fastGet(.SecWebSocketExtensions)) |protocol| {
sec_websocket_extensions = protocol;
}
// we must write the status first so that 200 OK isn't written
resp.writeStatus("101 Switching Protocols");
fetch_headers_to_use.toUWSResponse(comptime ssl_enabled, resp);
}
if (globalThis.hasException()) {
@@ -951,31 +953,6 @@ pub fn NewServer(protocol_enum: enum { http, https }, development_kind: enum { d
}
}
var cookies_to_write: ?*WebCore.CookieMap = null;
if (upgrader.cookies) |cookies| {
upgrader.cookies = null;
cookies_to_write = cookies;
}
defer {
if (cookies_to_write) |cookies| {
cookies.deref();
}
}
// Write status, custom headers, and cookies in one place
if (fetch_headers_to_use != null or cookies_to_write != null) {
// we must write the status first so that 200 OK isn't written
resp.writeStatus("101 Switching Protocols");
if (fetch_headers_to_use) |headers| {
headers.toUWSResponse(comptime ssl_enabled, resp);
}
if (cookies_to_write) |cookies| {
try cookies.write(globalThis, ssl_enabled, @ptrCast(resp));
}
}
// --- After this point, do not throw an exception
// See https://github.com/oven-sh/bun/issues/1339

View File

@@ -1771,8 +1771,10 @@ pub fn Package(comptime SemverIntType: type) type {
defer seen_workspace_names.deinit(allocator);
for (workspace_names.values(), workspace_names.keys()) |entry, path| {
// workspace names from their package jsons. duplicates not allowed
const gop = try seen_workspace_names.getOrPut(allocator, @truncate(String.Builder.stringHash(entry.name)));
// workspace names from their package jsons
// Use path as cache key since each workspace path must be unique
// This allows multiple workspaces with the same name but different versions (like pnpm)
const gop = try seen_workspace_names.getOrPut(allocator, @truncate(String.Builder.stringHash(path)));
if (gop.found_existing) {
// this path does alot of extra work to format the error message
// but this is ok because the install is going to fail anyways, so this

View File

@@ -341,7 +341,7 @@ pub fn writePreQuotedString(text_in: []const u8, comptime Writer: type, writer:
pub fn quoteForJSON(text: []const u8, bytes: *MutableString, comptime ascii_only: bool) !void {
const writer = bytes.writer();
try bytes.growIfNeeded(2 + text.len);
try bytes.growIfNeeded(estimateLengthForUTF8(text, ascii_only, '"'));
try bytes.appendChar('"');
try writePreQuotedString(text, @TypeOf(writer), writer, '"', ascii_only, true, .utf8);
bytes.appendChar('"') catch unreachable;

View File

@@ -1018,20 +1018,30 @@ pub const Resolver = struct {
debug.addNoteFmt("Resolved symlink \"{s}\" to \"{s}\"", .{ path.text, symlink_path });
}
} else if (dir.abs_real_path.len > 0) {
// When the directory is a symlink, we don't need to call getFdPath.
var parts = [_]string{ dir.abs_real_path, query.entry.base() };
var buf: bun.PathBuffer = undefined;
const out = r.fs.absBuf(&parts, &buf);
var out = r.fs.absBuf(&parts, &buf);
const store_fd = r.store_fd;
if (!query.entry.cache.fd.isValid() and store_fd) {
if (!query.entry.cache.fd.isValid()) {
buf[out.len] = 0;
const span = buf[0..out.len :0];
var file: bun.FD = .fromStdFile(try std.fs.openFileAbsoluteZ(span, .{ .mode = .read_only }));
query.entry.cache.fd = file;
Fs.FileSystem.setMaxFd(file.native());
var file: bun.FD = if (store_fd)
.fromStdFile(try std.fs.openFileAbsoluteZ(span, .{ .mode = .read_only }))
else
.fromStdFile(try bun.openFileForPath(span));
if (!store_fd) {
assert(file.stdioTag() == null);
out = try file.getFdPath(&buf);
file.close();
query.entry.cache.fd = .invalid;
} else {
query.entry.cache.fd = file;
Fs.FileSystem.setMaxFd(file.native());
}
}
defer {
@@ -1044,6 +1054,10 @@ pub const Resolver = struct {
}
}
if (store_fd) {
out = try bun.getFdPath(query.entry.cache.fd, &buf);
}
const symlink = try Fs.FileSystem.FilenameStore.instance.append(@TypeOf(out), out);
if (r.debug_logs) |*debug| {
debug.addNoteFmt("Resolved symlink \"{s}\" to \"{s}\"", .{ symlink, path.text });
@@ -2828,38 +2842,31 @@ pub const Resolver = struct {
// directory. The "pnpm" package manager generates a faulty "NODE_PATH"
// list which contains such paths and treating them as missing means we just
// ignore them during path resolution.
error.ENOTDIR, error.IsDir, error.NotDir => return null,
error.ENOENT,
error.ENOTDIR,
error.IsDir,
error.NotDir,
error.FileNotFound,
=> return null,
else => {
const cached_dir_entry_result = rfs.entries.getOrPut(queue_top.unsafe_path) catch unreachable;
// If we don't properly cache not found, then we repeatedly attempt to open the same directories, which causes a perf trace that looks like this stupidity;
//
// openat(dfd: CWD, filename: "node_modules/react", flags: RDONLY|DIRECTORY) = -1 ENOENT (No such file or directory)
// openat(dfd: CWD, filename: "node_modules/react/", flags: RDONLY|CLOEXEC|DIRECTORY) = -1 ENOENT (No such file or directory)
// openat(dfd: CWD, filename: "node_modules/react", flags: RDONLY|CLOEXEC|DIRECTORY) = -1 ENOENT (No such file or directory)
// openat(dfd: CWD, filename: "node_modules/react", flags: RDONLY|CLOEXEC|DIRECTORY) = -1 ENOENT (No such file or directory)
//
r.dir_cache.markNotFound(queue_top.result);
rfs.entries.markNotFound(cached_dir_entry_result);
switch (@as(anyerror, err)) {
error.ENOENT, error.FileNotFound => {},
else => {
if (comptime enable_logging) {
const pretty = queue_top.unsafe_path;
if (comptime enable_logging) {
const pretty = queue_top.unsafe_path;
r.log.addErrorFmt(
null,
logger.Loc{},
r.allocator,
"Cannot read directory \"{s}\": {s}",
.{
pretty,
@errorName(err),
},
) catch {};
}
},
r.log.addErrorFmt(
null,
logger.Loc{},
r.allocator,
"Cannot read directory \"{s}\": {s}",
.{
pretty,
@errorName(err),
},
) catch {};
}
return null;
},
};

View File

@@ -16,10 +16,9 @@ Note that compiling Bun may take up to 2.5 minutes. It is slow!
## Testing style
Use `bun:test` with files that end in `*.test.{ts,js,jsx,tsx,mjs,cjs}`. If it's a test/js/node/test/{parallel,sequential}/\*.js without a .test.extension, use `bun bd <file>` instead of `bun bd test <file>` since those expect exit code 0 and don't use bun's test runner.
Use `bun:test` with files that end in `*.test.ts`.
- **Do not write flaky tests**. Unless explicitly asked, **never wait for time to pass in tests**. Always wait for the condition to be met instead of waiting for an arbitrary amount of time. **Never use hardcoded port numbers**. Always use `port: 0` to get a random port.
- **Prefer concurrent tests over sequential tests**: When multiple tests in the same file spawn processes or write files, make them concurrent with `test.concurrent` or `describe.concurrent` unless it's very difficult to make them concurrent.
**Do not write flaky tests**. Unless explicitly asked, **never wait for time to pass in tests**. Always wait for the condition to be met instead of waiting for an arbitrary amount of time. **Never use hardcoded port numbers**. Always use `port: 0` to get a random port.
### Spawning processes
@@ -116,7 +115,7 @@ To create a repetitive string, use `Buffer.alloc(count, fill).toString()` instea
### Test Organization
- Use `describe` blocks for grouping related tests
- Regression tests for specific issues go in `/test/regression/issue/${issueNumber}.test.ts`. If there's no issue number, do not put them in the regression directory.
- Regression tests go in `/test/regression/issue/` with issue number
- Unit tests for specific features are organized by module (e.g., `/test/js/bun/`, `/test/js/node/`)
- Integration tests are in `/test/integration/`

View File

@@ -1,145 +0,0 @@
import { expect, test } from "bun:test";
test("request.cookies.set() should set websocket upgrade response cookie - issue #23474", async () => {
using server = Bun.serve({
port: 0,
routes: {
"/ws": req => {
// Set a cookie before upgrading
req.cookies.set("test", "123", {
httpOnly: true,
path: "/",
});
const upgraded = server.upgrade(req);
if (upgraded) {
return undefined;
}
return new Response("Upgrade failed", { status: 500 });
},
},
websocket: {
message(ws, message) {
ws.close();
},
},
});
const { promise, resolve, reject } = Promise.withResolvers();
// Use Bun.connect to send a WebSocket upgrade request and check response headers
const socket = await Bun.connect({
hostname: "localhost",
port: server.port,
socket: {
data(socket, data) {
try {
const response = new TextDecoder().decode(data);
// Check that we got a successful upgrade response
expect(response).toContain("HTTP/1.1 101");
expect(response).toContain("Upgrade: websocket");
// The critical check: Set-Cookie header should be present
expect(response).toContain("Set-Cookie:");
expect(response).toContain("test=123");
socket.end();
resolve();
} catch (err) {
reject(err);
}
},
error(socket, error) {
reject(error);
},
},
});
// Send a valid WebSocket upgrade request
socket.write(
"GET /ws HTTP/1.1\r\n" +
`Host: localhost:${server.port}\r\n` +
"Upgrade: websocket\r\n" +
"Connection: Upgrade\r\n" +
"Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n" +
"Sec-WebSocket-Version: 13\r\n" +
"\r\n",
);
await promise;
});
test("request.cookies.set() should work with custom headers in upgrade - issue #23474", async () => {
using server = Bun.serve({
port: 0,
routes: {
"/ws": req => {
// Set cookies before upgrading
req.cookies.set("session", "abc123", { path: "/" });
req.cookies.set("user", "john", { httpOnly: true });
const upgraded = server.upgrade(req, {
headers: {
"X-Custom-Header": "test",
},
});
if (upgraded) {
return undefined;
}
return new Response("Upgrade failed", { status: 500 });
},
},
websocket: {
message(ws, message) {
ws.close();
},
},
});
const { promise, resolve, reject } = Promise.withResolvers();
const socket = await Bun.connect({
hostname: "localhost",
port: server.port,
socket: {
data(socket, data) {
try {
const response = new TextDecoder().decode(data);
// Check that we got a successful upgrade response
expect(response).toContain("HTTP/1.1 101");
expect(response).toContain("Upgrade: websocket");
// Check custom header
expect(response).toContain("X-Custom-Header: test");
// Check that both cookies are present
expect(response).toContain("Set-Cookie:");
expect(response).toContain("session=abc123");
expect(response).toContain("user=john");
socket.end();
resolve();
} catch (err) {
reject(err);
}
},
error(socket, error) {
reject(error);
},
},
});
socket.write(
"GET /ws HTTP/1.1\r\n" +
`Host: localhost:${server.port}\r\n` +
"Upgrade: websocket\r\n" +
"Connection: Upgrade\r\n" +
"Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n" +
"Sec-WebSocket-Version: 13\r\n" +
"\r\n",
);
await promise;
});

View File

@@ -0,0 +1,44 @@
import { expect, test } from "bun:test";
import { bunEnv, bunExe, tempDir } from "harness";
test("workspace name validation should use path as cache key, not name", async () => {
// This reproduces the issue where multiple workspaces with the same name
// incorrectly report duplicate workspace name errors
using dir = tempDir("workspace-name-cache", {
"package.json": JSON.stringify({
name: "root",
workspaces: ["apps/*"],
}),
"apps/1000/package.json": JSON.stringify({
name: "1000",
version: "1.0.0",
}),
"apps/3000/package.json": JSON.stringify({
name: "1000",
version: "1.0.0",
}),
"apps/5000/package.json": JSON.stringify({
name: "1000",
version: "1.0.0",
}),
"apps/10000/package.json": JSON.stringify({
name: "1000",
version: "1.0.0",
}),
});
await using proc = Bun.spawn({
cmd: [bunExe(), "install"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
stdout: "pipe",
});
const [stderr, exitCode] = await Promise.all([proc.stderr.text(), proc.exited]);
// The install should succeed - having the same name in different workspace paths is valid
// (though not a good practice, it shouldn't error)
expect(exitCode).toBe(0);
expect(stderr).not.toContain('Workspace name "1000" already exists');
});