more benchs

This commit is contained in:
Ciro Spaciari
2025-08-28 11:56:25 -07:00
parent 305df47a2c
commit 5dd9ccb50e
12 changed files with 4020 additions and 3 deletions

View File

@@ -0,0 +1,8 @@
module bench
go 1.25.0
require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/go-sql-driver/mysql v1.9.3 // indirect
)

View File

@@ -0,0 +1,4 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=

View File

@@ -0,0 +1,111 @@
package main
import (
"context"
"database/sql"
"fmt"
"log"
"sync"
"time"
_ "github.com/go-sql-driver/mysql"
)
type UserRow struct {
ID uint64
FirstName string
LastName string
Email string
DOB time.Time // MySQL DATE -> time.Time (use parseTime=true)
}
func main() {
// Rust DSN: mysql://root:bun@localhost:55034/mysql
// Go DSN (mysql driver): user:pass@tcp(host:port)/db?params
dsn := "root:bun@tcp(localhost:55034)/mysql?parseTime=true&interpolateParams=true"
db, err := sql.Open("mysql", dsn)
if err != nil {
log.Fatal(err)
}
defer db.Close()
// --- Pool settings (cap at 10, like Rust .max_size(10)) ---
db.SetMaxOpenConns(10)
db.SetMaxIdleConns(10)
db.SetConnMaxLifetime(60 * time.Minute)
// Make sure we can connect
if err := db.Ping(); err != nil {
log.Fatal(err)
}
ctx := context.Background()
// Prepare once (Stmt is safe for concurrent use by multiple goroutines)
stmt, err := db.PrepareContext(ctx, "SELECT * FROM users_bun_bench LIMIT 100")
if err != nil {
log.Fatal(err)
}
defer stmt.Close()
// Workload config
const totalJobs = 1_000_000
// choose a reasonable worker count; you can tweak this
workers := 10
start := time.Now()
// Worker pool
jobs := make(chan struct{}, workers)
var wg sync.WaitGroup
runOne := func() {
defer wg.Done()
for range jobs {
// Run the query and scan rows (discarding results to avoid huge memory use)
rows, err := stmt.QueryContext(ctx)
if err != nil {
// Match Rust's "unwrap()" behavior by failing loudly
log.Fatal(err)
}
for rows.Next() {
var u UserRow
if err := rows.Scan(&u.ID, &u.FirstName, &u.LastName, &u.Email, &u.DOB); err != nil {
log.Fatal(err)
}
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
rows.Close()
}
}
// Start workers
wg.Add(workers)
for i := 0; i < workers; i++ {
go runOne()
}
// Enqueue jobs
for i := 0; i < totalJobs; i++ {
jobs <- struct{}{}
}
close(jobs)
// Wait for completion
wg.Wait()
dur := time.Since(start)
fmt.Printf("go-sql: %v\n", dur)
}
func max(a, b int) int {
if a > b {
return a
}
return b
}

View File

@@ -1,4 +1,4 @@
const sql = new Bun.SQL("mysql://root:bun@localhost:55002");
const sql = new Bun.SQL("mysql://root:bun@localhost:55034");
// Create the table if it doesn't exist
await sql`CREATE TABLE IF NOT EXISTS users_bun_bench (
@@ -29,7 +29,11 @@ if (+(existingUsers?.[0]?.count ?? existingUsers?.count) < 100) {
console.time("Bun.sql");
let promises = [];
<<<<<<< Updated upstream
for (let i = 0; i < 100_000; i++) {
=======
for (let i = 0; i < 1_000_000; i++) {
>>>>>>> Stashed changes
promises.push(sql`SELECT * FROM users_bun_bench LIMIT 100`);
}
await Promise.all(promises);

29
bench/mysql/mariadb.mjs Normal file
View File

@@ -0,0 +1,29 @@
import mariadb from "mariadb";
const pool = mariadb.createPool({
host: "localhost",
user: "root",
password: "bun",
database: "mysql",
port: 55034,
connectionLimit: 10,
acquireTimeout: 600000,
});
async function executeQuery() {
let conn;
try {
conn = await pool.getConnection();
return await conn.query("SELECT * FROM users_bun_bench LIMIT 100");
} finally {
if (conn) conn.release(); //release to pool
}
}
console.time("mariadb");
let promises = [];
for (let i = 0; i < 1_000_000; i++) {
promises.push(executeQuery());
}
await Promise.all(promises);
console.timeEnd("mariadb");
await pool.end();

View File

@@ -4,10 +4,10 @@ const pool = createPool({
user: "root",
password: "bun",
database: "mysql",
port: 55002,
port: 55034,
waitForConnections: true,
connectionLimit: 10,
idleTimeout: 60000, // idle connections timeout, in milliseconds, the default value 60000
idleTimeout: 60000,
queueLimit: 0,
enableKeepAlive: true,
keepAliveInitialDelay: 0,
@@ -49,7 +49,11 @@ if (+(existingUsers?.[0]?.count ?? existingUsers?.count) < 100) {
console.time("mysql2");
let promises = [];
<<<<<<< Updated upstream
for (let i = 0; i < 100_000; i++) {
=======
for (let i = 0; i < 1_000_000; i++) {
>>>>>>> Stashed changes
promises.push(pool.execute(`SELECT * FROM users_bun_bench LIMIT 100`));
}
await Promise.all(promises);

1668
bench/mysql/rust-diesel-async/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,11 @@
[package]
name = "rust-mysql"
version = "0.1.0"
edition = "2024"
[dependencies]
diesel = { version = "2", features = ["mysql", "chrono"] }
diesel-async = { version = "0.6.1", features = ["mysql", "tokio", "bb8"] }
tokio = { version = "1", features = ["macros", "rt-multi-thread"] }
futures = "0.3"
chrono = { version = "0.4", features = ["serde"] } # remove `serde` if you don't need it

View File

@@ -0,0 +1,60 @@
use diesel_async::pooled_connection::bb8::{Pool, RunError};
use diesel_async::pooled_connection::AsyncDieselConnectionManager;
use diesel_async::{AsyncMysqlConnection, RunQueryDsl};
use futures::future::join_all;
use diesel::sql_query;
use std::time::Instant;
use diesel::sql_types::*;
use std::time::Duration;
#[derive(Debug, diesel::QueryableByName)]
struct UserRow {
#[diesel(sql_type = Unsigned<BigInt>)]
id: u64,
#[diesel(sql_type = Varchar)]
first_name: String,
#[diesel(sql_type = Varchar)]
last_name: String,
#[diesel(sql_type = Varchar)]
email: String,
#[diesel(sql_type = Date)]
dob: chrono::NaiveDate,
}
#[tokio::main(flavor = "multi_thread")]
async fn main() -> Result<(), RunError> {
let config = AsyncDieselConnectionManager::<AsyncMysqlConnection>::new(
"mysql://root:bun@localhost:55034/mysql",
);
let pool = Pool::builder().connection_timeout(Duration::from_secs(36000)).max_size(10).build(config).await?;
let start = Instant::now();
// for i in 0..10usize {
let tasks: Vec<_> = (0..1_000_000usize).map(|_| {
let pool = pool.clone();
tokio::spawn(async move {
let mut conn = pool.get().await.unwrap();
let rows: Vec<UserRow> = sql_query("SELECT * FROM users_bun_bench LIMIT 100")
.get_results(&mut conn)
.await.unwrap();
Ok::<Vec<UserRow>, diesel::result::Error>(rows)
})
}).collect();
// Wait for all the concurrent tasks to finish
// let _ = join_all(tasks).await;
for task in tasks {
let _ = task.await;
}
// }
let duration = start.elapsed();
println!(
"diesel-async: {:.2?}",
duration
);
Ok(())
}

2070
bench/mysql/rust-sqlx/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
[package]
name = "tokio-mysql-pool"
version = "0.1.0"
edition = "2021"
[dependencies]
tokio = { version = "1", features = ["macros", "rt-multi-thread"] }
sqlx = { version = "0.7", features = ["mysql", "runtime-tokio-rustls"] }
anyhow = "1"
futures = "0.3"

View File

@@ -0,0 +1,38 @@
use futures::future::join_all;
use sqlx::{mysql::MySqlPoolOptions, Row};
use std::time::Instant;
#[tokio::main(flavor = "multi_thread")]
async fn main() -> Result<(), sqlx::Error> {
let db_url = "mysql://root:bun@localhost:55034/mysql";
// Create a Tokio-backed SQLx pool with a hard cap of 10 connections.
let pool = MySqlPoolOptions::new()
.max_connections(10)
.connect(&db_url)
.await?;
let start = Instant::now();
let tasks: Vec<_> = (0..1_000_000usize).map(|_| {
let pool = pool.clone();
tokio::spawn(async move {
sqlx::query("SELECT * FROM users_bun_bench LIMIT 100")
.fetch_all(&pool).await
})
}).collect();
for task in tasks {
let _ = task.await;
}
let duration = start.elapsed();
println!(
"sqlx: {:.2?}",
duration
);
Ok(())
}