Skip to content

Commit

Permalink
cleanup init, add len param to queue constructor, add stats
Browse files Browse the repository at this point in the history
  • Loading branch information
bootreer committed Feb 26, 2024
1 parent dd94e22 commit 2cb6b35
Show file tree
Hide file tree
Showing 5 changed files with 119 additions and 121 deletions.
65 changes: 63 additions & 2 deletions examples/init.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use std::env;
use std::process;

pub fn main() {
pub fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut args = env::args();
args.next();

Expand All @@ -12,5 +12,66 @@ pub fn main() {
process::exit(1);
}
};
let _dev = vroom::init(&pci_addr).unwrap();
let mut nvme = vroom::init(&pci_addr)?;

// Testing stuff
let n = 10;
let n2 = 100_000;
let blocks = 8;

let mut read = std::time::Duration::new(0, 0);
let mut write = std::time::Duration::new(0, 0);
let mut read_buf = vec![0; blocks * 512];

// let mut write_batched = std::time::Duration::new(0, 0);
// let mut read_batched = std::time::Duration::new(0, 0);
// let mut read_bbuf = vec![0; blocks * 512];
let mut rng = rand::thread_rng();
use rand::seq::SliceRandom;

let mut seq: Vec<u64> = Vec::from_iter(0..n);

for _ in 0..n2 {
seq.shuffle(&mut rng);
let lba = 0;
for i in &seq {
let rand_block = &(0..(512 * blocks))
.map(|_| rand::random::<u8>())
.collect::<Vec<_>>()[..];
unsafe { (*nvme.buffer.virt)[..rand_block.len()].copy_from_slice(rand_block) };

// write
let before = std::time::Instant::now();
nvme.write_raw(rand_block, lba + (*i * blocks as u64))?;
write += before.elapsed();

// let before = Instant::now();
// nvme.batched_write(1, rand_block, lba, 256)?;
// write_batched += before.elapsed();

// read
// let before = Instant::now();
// nvme.batched_read(1, &mut read_bbuf[..], lba, 256)?;
// read_batched += before.elapsed();

let before = std::time::Instant::now();
nvme.read(1, &mut read_buf[..], lba + (*i * blocks as u64))?;
read += before.elapsed();

// assert_eq!(read_buf, rand_block);
// assert_eq!(read_buf, read_bbuf);

// lba += blocks as u64;
}
}

println!("total completions: {}", nvme.stats.completions);
println!("total submissions: {}", nvme.stats.submissions);
println!(
"read time: {:?}; write time: {:?}; total: {:?}",
read,
write,
read + write
);
Ok(())
}
1 change: 0 additions & 1 deletion rust-toolchain

This file was deleted.

68 changes: 5 additions & 63 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,10 @@ mod pci;
#[allow(dead_code)]
mod queues;

use self::pci::*;
use pci::*;
use queues::QUEUE_LENGTH;
use nvme::NvmeDevice;
use std::error::Error;
use std::time::Instant;
// use std::io::Read;
// use std::fs::File;

#[cfg(target_arch = "aarch64")]
#[inline(always)]
Expand All @@ -40,7 +38,7 @@ pub(crate) fn pause() {
}
}

pub fn init(pci_addr: &str) -> Result<(), Box<dyn Error>> {
pub fn init(pci_addr: &str) -> Result<NvmeDevice, Box<dyn Error>> {
let mut vendor_file = pci_open_resource_ro(pci_addr, "vendor").expect("wrong pci address");
let mut device_file = pci_open_resource_ro(pci_addr, "device").expect("wrong pci address");
let mut config_file = pci_open_resource_ro(pci_addr, "config").expect("wrong pci address");
Expand All @@ -56,68 +54,14 @@ pub fn init(pci_addr: &str) -> Result<(), Box<dyn Error>> {
}

let mut nvme = NvmeDevice::init(pci_addr)?;

nvme.identify_controller()?;
nvme.create_io_queue_pair()?;
nvme.create_io_queue_pair(QUEUE_LENGTH)?;
let ns = nvme.identify_namespace_list(0);

for n in ns {
println!("ns_id: {n}");
nvme.identify_namespace(n);
}

// Testing stuff
let n = 100;
let n2 = 100;
let blocks = 1024 * 4;

let mut read = std::time::Duration::new(0, 0);
let mut read_batched = std::time::Duration::new(0, 0);
let mut _write = std::time::Duration::new(0, 0);
let mut write_batched = std::time::Duration::new(0, 0);

let mut read_buf = vec![0; blocks * 512];
let mut read_bbuf = vec![0; blocks * 512];

for _ in 0..n2 {
let mut lba = 0;
for _ in 0..n {
let rand_block = &(0..(512 * blocks))
.map(|_| rand::random::<u8>())
.collect::<Vec<_>>()[..];
unsafe { (*nvme.buffer.virt)[..].copy_from_slice(rand_block) };

// write
// let before = Instant::now();
// nvme.write_raw(rand_block, lba)?;
// write += before.elapsed();

let before = Instant::now();
nvme.batched_write(1, rand_block, lba, 256)?;
write_batched += before.elapsed();

// read
let before = Instant::now();
nvme.batched_read(1, &mut read_bbuf[..], lba, 256)?;
read_batched += before.elapsed();

let before = Instant::now();
nvme.read(1, &mut read_buf[..], lba)?;
read += before.elapsed();
// assert_eq!(read_buf, rand_block);
// assert_eq!(read_buf, read_bbuf);

lba += blocks as u64;
// nvme.read(1, 4);
}
}
let n = n * n2;

// println!("{blocks} block write: {:?}", write / n);
println!("{blocks} block batched write: {:?}", write_batched / n);
println!("{blocks} block read: {:?}", read / n);
println!("{blocks} block batched read: {:?}", read_batched / n);
Ok(())
Ok(nvme)
}

#[derive(Debug, Clone, Copy)]
Expand All @@ -131,6 +75,4 @@ pub struct NvmeNamespace {
pub struct NvmeStats {
pub completions: u64,
pub submissions: u64,
pub reads: u64,
pub writes: u64,
}
81 changes: 40 additions & 41 deletions src/nvme.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ pub struct NvmeDevice {
pub buffer: Dma<[u8; 2048 * 1024]>, // 2MiB of buffer
prp_list: Dma<[u64; 512]>, // Address of PRP's, devices doesn't necessarily support 2MiB page sizes; 8 Bytes * 512 = 4096
namespaces: HashMap<u32, NvmeNamespace>,
stats: NvmeStats,
pub stats: NvmeStats,
}

#[allow(unused)]
Expand All @@ -128,8 +128,8 @@ impl NvmeDevice {
}
},
len,
admin_sq: NvmeSubQueue::new()?,
admin_cq: NvmeCompQueue::new()?,
admin_sq: NvmeSubQueue::new(QUEUE_LENGTH)?,
admin_cq: NvmeCompQueue::new(QUEUE_LENGTH)?,
sub_queues: vec![],
comp_queues: vec![],
buffer: Dma::allocate(crate::memory::HUGE_PAGE_SIZE, true)?,
Expand All @@ -142,7 +142,6 @@ impl NvmeDevice {
unsafe {
(*dev.prp_list.virt)[i - 1] = (dev.buffer.phys + i * 4096) as u64;
}
// println!("buffer phys 0x{:x}", dev.buffer.phys + i * 4096);
}

println!("CAP: 0x{:x}", dev.get_reg64(NvmeRegs64::CAP as u64));
Expand Down Expand Up @@ -197,9 +196,7 @@ impl NvmeDevice {
loop {
let csts = dev.get_reg32(NvmeRegs32::CSTS as u32);
if csts & 1 == 0 {
unsafe {
super::pause();
}
super::pause();
} else {
break;
}
Expand Down Expand Up @@ -248,16 +245,16 @@ impl NvmeDevice {
Ok(())
}

pub fn create_io_queue_pair(&mut self) -> Result<(), Box<dyn Error>> {
pub fn create_io_queue_pair(&mut self, len: usize) -> Result<(), Box<dyn Error>> {
println!("Requesting i/o completion queue");
let cq_id = self.comp_queues.len() + 1;
let queue = NvmeCompQueue::new()?;
let queue = NvmeCompQueue::new(len)?;
let comp = self.submit_and_complete_admin(|c_id, _| {
NvmeCommand::create_io_completion_queue(
c_id,
cq_id as u16,
queue.get_addr(),
(QUEUE_LENGTH - 1) as u16,
(len - 1) as u16,
)
});
let status = comp.status >> 1;
Expand All @@ -268,20 +265,20 @@ impl NvmeDevice {
status & 0xFF,
(status >> 8) & 0x7
);
println!("something went awry: 0x{:x}", status)
return Err("Requesting i/o completion queue failed".into());
}

self.comp_queues.push(queue);

println!("Requesting i/o submission queue");
let queue = NvmeSubQueue::new()?;
let queue = NvmeSubQueue::new(len)?;
let q_id = self.sub_queues.len() + 1;
let comp = self.submit_and_complete_admin(|c_id, _| {
NvmeCommand::create_io_submission_queue(
c_id,
q_id as u16,
queue.get_addr(),
(QUEUE_LENGTH - 1) as u16,
(len - 1) as u16,
cq_id as u16,
)
});
Expand All @@ -293,7 +290,7 @@ impl NvmeDevice {
status & 0xFF,
(status >> 8) & 0x7
);
println!("something went awry: 0x{:x}", status)
return Err("Requesting i/o submission queue failed".into());
}

self.sub_queues.push(queue);
Expand Down Expand Up @@ -349,6 +346,27 @@ impl NvmeDevice {
namespace
}

pub fn write_string(&mut self, data: String, lba: u64) -> Result<(), Box<dyn Error>> {
self.write_raw(data.as_bytes(), lba)
}

pub fn write_raw(&mut self, data: &[u8], mut lba: u64) -> Result<(), Box<dyn Error>> {
let ns = *self.namespaces.get(&1).unwrap();
// println!("data len: {}", data.len());

// for chunk in data.chunks(HUGE_PAGE_SIZE) {
for chunk in data.chunks(128 * 4096) {
// unsafe {
// (*self.buffer.virt)[..chunk.len()].copy_from_slice(chunk);
// }
let blocks = (chunk.len() + ns.block_size as usize - 1) / ns.block_size as usize;
self.namespace_io(&ns, blocks as u64, lba, true)?;
lba += blocks as u64;
}

Ok(())
}

pub fn read(
&mut self,
ns_id: u32,
Expand All @@ -364,14 +382,9 @@ impl NvmeDevice {
// chunk.copy_from_slice(&unsafe { (*self.buffer.virt) }[..chunk.len()]);
lba += blocks as u64;
}
// self.read_lba(ns_id, blocks, lba)
Ok(())
}

pub fn write_string(&mut self, data: String, lba: u64) -> Result<(), Box<dyn Error>> {
self.write_raw(data.as_bytes(), lba)
}

pub fn submit_io(
&mut self,
ns: &NvmeNamespace,
Expand Down Expand Up @@ -423,6 +436,7 @@ impl NvmeDevice {
eprintln!("{:?}", c_entry);
return None;
}
self.stats.completions += 1;
Some(c_entry.sq_head)
}

Expand All @@ -439,7 +453,7 @@ impl NvmeDevice {

for chunk in data.chunks(HUGE_PAGE_SIZE) {
// unsafe { (*self.buffer.virt)[..chunk.len()].copy_from_slice(chunk) }
let mut tail = self.sub_queues[q_id - 1].tail;
let tail = self.sub_queues[q_id - 1].tail;

let batch_len = std::cmp::min(batch_len, chunk.len() as u64 / block_size);
let batch_size = chunk.len() as u64 / batch_len;
Expand All @@ -453,9 +467,8 @@ impl NvmeDevice {
lba,
true,
) {
self.stats.submissions += 1;
self.write_reg_idx(NvmeArrayRegs::SQyTDBL, q_id as u16, tail as u32);
// tail = new_tail;
// self.write_reg_idx(NvmeArrayRegs::SQyTDBL, q_id as u16, tail as u32);
} else {
eprintln!("tail: {tail}, batch_len: {batch_len}, batch_size: {batch_size}, blocks: {blocks}");
}
Expand All @@ -479,21 +492,21 @@ impl NvmeDevice {
let q_id = 1;

for chunk in data.chunks_mut(HUGE_PAGE_SIZE) {
let mut tail = self.sub_queues[q_id - 1].tail;
let tail = self.sub_queues[q_id - 1].tail;

let batch_len = std::cmp::min(batch_len, chunk.len() as u64 / block_size);
let batch_size = chunk.len() as u64 / batch_len;
let blocks = batch_size / ns.block_size;

for i in 0..batch_len {
if let Some(new_tail) = self.submit_io(
if let Some(tail) = self.submit_io(
&ns,
self.buffer.phys as u64 + i * batch_size,
blocks,
lba,
false,
) {
tail = new_tail;
self.stats.submissions += 1;
self.write_reg_idx(NvmeArrayRegs::SQyTDBL, q_id as u16, tail as u32);
} else {
eprintln!("tail: {tail}, batch_len: {batch_len}, batch_size: {batch_size}, blocks: {blocks}");
Expand Down Expand Up @@ -549,27 +562,13 @@ impl NvmeDevice {
};

let tail = io_q.submit(entry);
self.stats.submissions += 1;

self.write_reg_idx(NvmeArrayRegs::SQyTDBL, q_id as u16, tail as u32);
self.sub_queues[q_id - 1].head = self.complete_io(1).unwrap() as usize;
Ok(())
}

pub fn write_raw(&mut self, data: &[u8], mut lba: u64) -> Result<(), Box<dyn Error>> {
let ns = *self.namespaces.get(&1).unwrap();
// println!("data len: {}", data.len());

// for chunk in data.chunks(HUGE_PAGE_SIZE) {
for chunk in data.chunks(128 * 4096) {
// unsafe {
// (*self.buffer.virt)[..chunk.len()].copy_from_slice(chunk);
// }
let blocks = (chunk.len() + ns.block_size as usize - 1) / ns.block_size as usize;
self.namespace_io(&ns, blocks as u64, lba, true)?;
lba += blocks as u64;
}

Ok(())
}

pub fn submit_and_complete_admin<F: FnOnce(u16, usize) -> NvmeCommand>(
&mut self,
Expand Down
Loading

0 comments on commit 2cb6b35

Please sign in to comment.