1
0
Fork 0
mirror of https://github.com/zigzap/zap.git synced 2025-10-20 15:14:08 +00:00

added @alexpyattaev clean rust bench version

This commit is contained in:
Rene Schallner 2023-08-22 14:22:21 +02:00
parent 02b92d1f5c
commit 8b82cae541
5 changed files with 166 additions and 0 deletions

14
wrk/rust/clean/.gitignore vendored Normal file
View file

@ -0,0 +1,14 @@
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb

View file

@ -0,0 +1,9 @@
[package]
name = "hello"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# crossbeam = { version = "0.8.2", features = ["crossbeam-channel"] }

View file

@ -0,0 +1 @@
Hello from RUST!

101
wrk/rust/clean/src/lib.rs Normal file
View file

@ -0,0 +1,101 @@
//Crossbeam should, but does not make this faster.
//use crossbeam::channel::bounded;
use std::{net::TcpStream, sync::mpsc, thread};
type Job = (fn(TcpStream), TcpStream);
type Sender = mpsc::Sender<Job>;
//type Sender = crossbeam::channel::Sender<Job>;
type Receiver = mpsc::Receiver<Job>;
//type Receiver = crossbeam::channel::Receiver<Job>;
pub struct ThreadPool {
workers: Vec<Worker>,
senders: Vec<Sender>,
next_sender: usize,
}
impl ThreadPool {
/// Create a new ThreadPool.
///
/// The size is the number of threads in the pool.
///
/// # Panics
///
/// The `new` function will panic if the size is zero.
pub fn new(size: usize) -> ThreadPool {
assert!(size > 0);
let mut workers = Vec::with_capacity(size);
let mut senders = Vec::with_capacity(size);
for id in 0..size {
//let (sender, receiver) = bounded(2);
let (sender, receiver) = mpsc::channel();
senders.push(sender);
workers.push(Worker::new(id, receiver));
}
ThreadPool {
workers,
senders,
next_sender: 0,
}
}
/// round robin over available workers to ensure we never have to buffer requests
pub fn execute(&mut self, handler: fn(TcpStream), stream: TcpStream) {
let job = (handler, stream);
self.senders[self.next_sender].send(job).unwrap();
//self.senders[self.next_sender].try_send(job).unwrap();
self.next_sender += 1;
if self.next_sender == self.senders.len() {
self.next_sender = 0;
}
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
self.senders.clear();
for worker in &mut self.workers {
println!("Shutting down worker {}", worker.id);
if let Some(thread) = worker.thread.take() {
thread.join().unwrap();
}
}
}
}
struct Worker {
id: usize,
thread: Option<thread::JoinHandle<()>>,
}
impl Worker {
fn new(id: usize, receiver: Receiver) -> Worker {
let thread = thread::spawn(move || Self::work(receiver));
Worker {
id,
thread: Some(thread),
}
}
fn work(receiver: Receiver) {
loop {
let message = receiver.recv();
match message {
Ok((handler, stream)) => {
// println!("Worker got a job; executing.");
handler(stream);
}
Err(_) => {
// println!("Worker disconnected; shutting down.");
break;
}
}
}
}
}

View file

@ -0,0 +1,41 @@
use hello::ThreadPool;
use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
fn main() {
let listener = TcpListener::bind("127.0.0.1:7878").unwrap();
//Creating a massive amount of threads so we can always have one ready to go.
let mut pool = ThreadPool::new(128);
// for stream in listener.incoming().take(2) {
for stream in listener.incoming() {
let stream = stream.unwrap();
//handle_connection(stream);
pool.execute(handle_connection, stream);
}
println!("Shutting down.");
}
fn handle_connection(mut stream: TcpStream) {
stream.set_nodelay(true).expect("set_nodelay call failed");
let mut buffer = [0; 1024];
let nbytes = stream.read(&mut buffer).unwrap();
if nbytes == 0 {
return;
}
let status_line = "HTTP/1.1 200 OK";
let contents = "HELLO from RUST!";
let response = format!(
"{}\r\nContent-Length: {}\r\n\r\n{}",
status_line,
contents.len(),
contents
);
stream.write_all(response.as_bytes()).unwrap();
}