fix: correct logic bugs in libarchive, s3 credentials, and postgres bindings (#25905)

## Summary

- **libarchive.zig:110**: Fix self-assignment bug where `this.pos` was
assigned to itself instead of `new_pos`
- **s3/credentials.zig:165,176,199**: Fix impossible range checks -
`and` should be `or` for pageSize, partSize, and retry validation (a
value cannot be both less than MIN and greater than MAX simultaneously)
- **postgres.zig:14**: Fix copy-paste error where createConnection
function was internally named "createQuery"

## Test plan

- [ ] Verify S3 credential validation now properly rejects out-of-range
values for pageSize, partSize, and retry
- [ ] Verify libarchive seek operations work correctly
- [ ] Verify postgres createConnection function has correct internal
name

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
Co-authored-by: Claude Bot <claude-bot@bun.sh>
This commit is contained in:
Dylan Conway
2026-01-08 19:46:06 -08:00
committed by GitHub
parent 3842a5ee18
commit 596e83c918
4 changed files with 6 additions and 6 deletions

View File

@@ -107,7 +107,7 @@ pub const BufferReadStream = struct {
const proposed = pos + offset;
const new_pos = @min(@max(proposed, 0), buflen - 1);
this.pos = @as(usize, @intCast(this.pos));
this.pos = @as(usize, @intCast(new_pos));
return new_pos - pos;
}

View File

@@ -162,7 +162,7 @@ pub const S3Credentials = struct {
}
if (try opts.getOptional(globalObject, "pageSize", i64)) |pageSize| {
if (pageSize < MultiPartUploadOptions.MIN_SINGLE_UPLOAD_SIZE and pageSize > MultiPartUploadOptions.MAX_SINGLE_UPLOAD_SIZE) {
if (pageSize < MultiPartUploadOptions.MIN_SINGLE_UPLOAD_SIZE or pageSize > MultiPartUploadOptions.MAX_SINGLE_UPLOAD_SIZE) {
return globalObject.throwRangeError(pageSize, .{
.min = @intCast(MultiPartUploadOptions.MIN_SINGLE_UPLOAD_SIZE),
.max = @intCast(MultiPartUploadOptions.MAX_SINGLE_UPLOAD_SIZE),
@@ -173,7 +173,7 @@ pub const S3Credentials = struct {
}
}
if (try opts.getOptional(globalObject, "partSize", i64)) |partSize| {
if (partSize < MultiPartUploadOptions.MIN_SINGLE_UPLOAD_SIZE and partSize > MultiPartUploadOptions.MAX_SINGLE_UPLOAD_SIZE) {
if (partSize < MultiPartUploadOptions.MIN_SINGLE_UPLOAD_SIZE or partSize > MultiPartUploadOptions.MAX_SINGLE_UPLOAD_SIZE) {
return globalObject.throwRangeError(partSize, .{
.min = @intCast(MultiPartUploadOptions.MIN_SINGLE_UPLOAD_SIZE),
.max = @intCast(MultiPartUploadOptions.MAX_SINGLE_UPLOAD_SIZE),
@@ -196,7 +196,7 @@ pub const S3Credentials = struct {
}
if (try opts.getOptional(globalObject, "retry", i32)) |retry| {
if (retry < 0 and retry > 255) {
if (retry < 0 or retry > 255) {
return globalObject.throwRangeError(retry, .{
.min = 0,
.max = 255,

View File

@@ -11,7 +11,7 @@ pub fn createBinding(globalObject: *jsc.JSGlobalObject) JSValue {
binding.put(
globalObject,
ZigString.static("createConnection"),
jsc.JSFunction.create(globalObject, "createQuery", PostgresSQLConnection.call, 2, .{}),
jsc.JSFunction.create(globalObject, "createConnection", PostgresSQLConnection.call, 2, .{}),
);
return binding;

View File

@@ -243,7 +243,7 @@ describe("s3 - Storage class", () => {
const writer = s3.file("file_from_writer").writer({
storageClass,
queueSize: 10,
partSize: 5 * 1024,
partSize: 5 * 1024 * 1024, // 5MB minimum
});
const bigFile = Buffer.alloc(10 * 1024 * 1024);