chore: Removed rust code from repo

This commit is contained in:
Nicholas Novak 2023-10-28 22:05:45 -07:00
parent c73a555b04
commit 511d494cd9
13 changed files with 0 additions and 2493 deletions

1728
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,19 +0,0 @@
[package]
name = "spatial-db"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
arrow-array = "47.0.0"
axum = "0.6.20"
clap = { version = "4.4.5", features = ["derive"] }
parquet = "47.0.0"
rand = "0.8.5"
serde = { version = "1.0.189", features = ["derive"] }
serde-big-array = "0.5.1"
serde_arrays = "0.1.0"
serde_json = "1.0.107"
serde_with = "3.4.0"
tokio = { version = "1.32.0", features = ["macros", "rt-multi-thread"] }

View File

@ -1,33 +0,0 @@
#![feature(test)]
extern crate serde;
#[macro_use]
extern crate serde_big_array;
mod simple_server;
mod storage;
mod storage_server;
use clap::Parser;
#[cfg(test)]
mod tests;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// `http` starts a build-in http server that listens for reads and writes
/// to the database
#[arg(long)]
http: bool,
}
fn main() {
let args = Args::parse();
if args.http {
println!("Proxy was enabled");
storage_server::main();
}
println!("Hello, world!");
}

View File

@ -1 +0,0 @@
pub mod server;

View File

@ -1,79 +0,0 @@
use crate::storage::disk_storage::ChunkStorageCache;
use crate::storage::world::{BlockID, BlockPos, BlockRange, ChunkData, ChunkPos};
use crate::storage_server::StorageServer;
#[derive(Debug)]
struct MultipleBlocks {
id: BlockID,
range: BlockRange,
}
#[derive(Debug)]
pub struct SimpleServer {
chunk_storage: ChunkStorageCache,
}
impl SimpleServer {
pub fn new() -> Self {
SimpleServer {
chunk_storage: ChunkStorageCache::new(),
}
}
pub fn num_chunks(&self) -> usize {
unimplemented!()
}
fn chunk_at(&mut self, block_pos: &BlockPos) -> Option<ChunkData> {
let chunk_pos = ChunkPos::from(block_pos);
let chunk = self
.chunk_storage
.fetch_chunk_by_pos(&chunk_pos)
.expect("Finding chunk failed");
Some(chunk)
}
fn create_chunk_at(&mut self, chunk_pos: &ChunkPos) {
self.chunk_storage
.fetch_chunk_by_pos(&chunk_pos)
.expect("Creatinc chunk failed");
}
}
impl StorageServer for SimpleServer {
fn change_block(&mut self, target_state: BlockID, world_position: &BlockPos) {
let mut chunk = self.chunk_at(world_position);
// Test if there is a chunk that already exists
if chunk.is_none() {
self.create_chunk_at(&ChunkPos::from(world_position));
chunk = self.chunk_at(world_position);
}
let mut chunk = chunk.expect("Could not find chunk");
// Find the section that the block is located in
let current_section = &mut chunk.sections[world_position.y % 16];
// Find the index that the block is at, and update its state
let chunk_array_index = current_section.index_of_block(&world_position);
current_section.update_block_at_index(&target_state, chunk_array_index);
}
fn change_block_range(&mut self, target_stage: BlockID, start: &BlockPos, end: &BlockPos) {
unimplemented!()
}
fn read_block_at(&mut self, pos: &BlockPos) -> BlockID {
let chunk = self.chunk_at(pos);
if let Some(chunk) = chunk {
let chunk_section = chunk.section_for(pos);
return chunk_section.get_block_at_index(pos).clone();
}
BlockID::Empty
}
}

View File

@ -1,2 +0,0 @@

View File

@ -1,128 +0,0 @@
use super::world::{ChunkData, ChunkPos};
use std::cmp::Ordering;
use std::error::Error;
use std::io::{BufReader, ErrorKind, Write};
use std::{collections::HashMap, fs::File, time::Instant};
const CACHED_CHUNK_FILES: usize = 1;
/// `ChunkStorageCache` caches a list of the most recently used file handles
/// where chunks are stored from, and allows for faster accessing of the data
/// from chunks
#[derive(Debug)]
pub struct ChunkStorageCache {
// `cached_chunk_files` is a vector of cached file handles that are already open
cached_chunk_files: [Option<File>; CACHED_CHUNK_FILES],
// `cached_file_names` is a list of all the filenames that are contained
// within the cache
cached_file_names: HashMap<String, usize>,
last_used_times: [Instant; CACHED_CHUNK_FILES],
}
impl ChunkStorageCache {
pub fn new() -> Self {
ChunkStorageCache {
cached_chunk_files: [None; CACHED_CHUNK_FILES],
cached_file_names: HashMap::new(),
last_used_times: [Instant::now(); CACHED_CHUNK_FILES],
}
}
/// `load_chunk_file` is called whenever a file is missing in the file cache
/// and needs to be loaded from disk
///
/// This replaces a slot for another file in the cache, according to the
/// caching strategy
fn load_chunk_file(&mut self, chunk_pos: &ChunkPos, file_name: &str) -> &File {
let chunk_file = File::options().write(true).read(true).open(file_name);
let chunk_file = match chunk_file {
Ok(file) => file,
Err(err) => match err.kind() {
ErrorKind::NotFound => {
let mut new_chunk_file = File::options()
.write(true)
.read(true)
.create(true)
.open(file_name)
.expect("Opening new chunk file failed");
let blank_chunk = ChunkData::new(chunk_pos);
let encoded_chunk = serde_json::to_string(&blank_chunk).unwrap();
new_chunk_file
.write_all(encoded_chunk.as_bytes())
.expect("Error writing data to chunk");
new_chunk_file
}
err => panic!("Opening new file for chunk failed with: {:?}", err),
},
};
// Add the newly opened file to the cache
// Insert the new item to replace the item that was last accessed
// The minimum time should be the oldest time
let (last_used_index, _) = self
.last_used_times
.iter()
.enumerate()
.reduce(
|(fst_index, fst_time), (snd_index, snd_time)| match fst_time.cmp(&snd_time) {
Ordering::Less => (fst_index, fst_time),
Ordering::Equal | Ordering::Greater => (snd_index, snd_time),
},
)
.expect("There should always be a last used index");
// Next, we have to:
// * Remove the old filename and index mapping from the names
// * Replace the last used time with the curent time
// * Replace the open file with the current one
if !self.cached_file_names.is_empty() {
// Find the name of the previous entry
let (previous_file_name, _) = self
.cached_file_names
.iter()
.find(|(_, &array_index)| array_index == last_used_index)
.expect("The last used index should always have a name");
self.cached_file_names.remove(&previous_file_name.clone());
}
self.cached_file_names
.insert(file_name.to_string(), last_used_index);
// Replace the timestamp with the new timestamp
self.last_used_times[last_used_index] = Instant::now();
self.cached_chunk_files[last_used_index] = Some(chunk_file);
self.cached_chunk_files[last_used_index].as_ref().unwrap()
}
/// `fetch_chunk_by_pos` takes in the position of a chunk, and returns the
/// data of the chunk from disk
///
/// This operation is cached, if possible, so that subsequent accesses to
/// the same chunk are handled by the same file
pub fn fetch_chunk_by_pos(&mut self, pos: &ChunkPos) -> Result<ChunkData, Box<dyn Error>> {
let file_name = pos.storage_file_name();
let file_index = self.cached_file_names.get(file_name.as_str());
let chunk_file = match file_index {
Some(index) => self.cached_chunk_files[*index].as_ref().unwrap(),
None => self.load_chunk_file(pos, file_name.as_str()),
};
let file_contents = std::io::read_to_string(chunk_file)?;
let read_data: ChunkData = serde_json::from_str(file_contents.as_str())?;
// let read_data: ChunkData = serde_json::from_reader(&mut file_buffer)?;
// let read_data = ChunkData::new(&ChunkPos { x: 0, z: 0 });
Ok(read_data)
}
}

View File

@ -1,3 +0,0 @@
mod chunk_compression;
pub mod disk_storage;
pub mod world;

View File

@ -1,213 +0,0 @@
use core::fmt;
use serde::ser;
use serde::ser::{SerializeSeq, SerializeStruct, Serializer};
use serde::{Deserialize, Serialize};
use std::{
cmp::{max, min},
fmt::Debug,
fs::File,
};
use serde_big_array::BigArray;
const SECTIONS_PER_CHUNK: usize = 16;
const SLICE_SIZE: usize = 16 * 16;
const DATABASE_FILE_LOCATION: &str = "./persistence";
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ChunkPos {
pub x: isize,
pub z: isize,
}
impl From<&BlockPos> for ChunkPos {
fn from(value: &BlockPos) -> Self {
ChunkPos {
x: value.x / 16,
z: value.z / 16,
}
}
}
impl ChunkPos {
pub fn storage_file_name(&self) -> String {
format!("{DATABASE_FILE_LOCATION}/{}.{}.chunk", self.x, self.z)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ChunkData {
pub pos: ChunkPos,
#[serde(with = "serde_arrays")]
pub sections: [ChunkSection; SECTIONS_PER_CHUNK],
}
// impl Serialize for ChunkData {
// fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
// where
// S: serde::Serializer,
// {
// let mut seq = serializer.serialize_seq(Some(self.sections.len()))?;
//
// for section in self.sections {
// seq.serialize_element(&section)?;
// }
// seq.end()
// }
// }
impl ChunkData {
pub fn new(pos: &ChunkPos) -> Self {
ChunkData {
pos: pos.clone(),
sections: [ChunkSection::new(); SECTIONS_PER_CHUNK],
}
}
pub fn section_for(&self, block_pos: &BlockPos) -> &ChunkSection {
&self.sections[block_pos.y % 16]
}
pub fn write_to_file(&self, output_file: &mut File) {
let serialized = serde_json::to_string(self).unwrap();
}
pub fn read_from_file(chunk_file: &File) -> Self {
unimplemented!()
}
}
// https://wiki.vg/Chunk_Format
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
pub struct ChunkSection {
/// The number of non-empty blocks in the section. If completely full, the
/// section contains a 16 x 16 x 16 cube of blocks = 4096 blocks
/// If the section is empty, this is skipped
block_count: u16,
/// The data for all the blocks in the chunk
/// The representation for this may be different based on the number of
/// non-empty blocks
#[serde(with = "serde_arrays")]
block_states: [BlockID; 16 * 16 * 16],
}
// impl Debug for ChunkSection {
// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// write!(f, "ChunkSection {{ blocks: {}, states: ", self.block_count)?;
// if self.block_count > 0 {
// write!(f, "{:?}", self.block_states)?;
// }
// write!(f, " }}")
// }
// }
impl ChunkSection {
pub fn new() -> Self {
ChunkSection {
block_count: 0,
block_states: [BlockID::Empty; 16 * 16 * 16],
}
}
pub fn index_of_block(&self, pos: &BlockPos) -> usize {
let base_x = pos.x.rem_euclid(16) as usize;
let base_y = pos.y.rem_euclid(16) as usize;
let base_z = pos.z.rem_euclid(16) as usize;
(base_y * SLICE_SIZE) + (base_z * 16) + base_x
}
pub fn update_block_at_index(&mut self, id: &BlockID, index: usize) {
let existing_block = &self.block_states[index];
match existing_block {
BlockID::Empty => match id {
BlockID::Generic => {
// If the existing block is empty, and the block that we
// are inserting is non-empty, increment the number of blocks
self.block_count += 1;
}
_ => {}
},
_ => match id {
BlockID::Empty => {
// If the existing block is non-empty, and the block that
// we are inserting is empty, then decrement the number of
// blocks
self.block_count -= 1;
}
_ => {}
},
}
self.block_states[index] = id.clone();
}
pub fn get_block_at_index(&self, pos: &BlockPos) -> &BlockID {
let array_index = self.index_of_block(pos);
&self.block_states[array_index]
}
}
/// `BlockPos` represents the location of a block in world space
#[derive(Debug, Clone, PartialEq)]
pub struct BlockPos {
pub x: isize,
pub y: usize,
pub z: isize,
}
impl BlockPos {
pub fn new(x: isize, y: usize, z: isize) -> Self {
BlockPos { x, y, z }
}
}
/// BlockRange represents a range of blocks that have been updated
#[derive(Debug)]
pub struct BlockRange {
pub start: BlockPos,
pub end: BlockPos,
}
impl BlockRange {
pub fn new(start: &BlockPos, end: &BlockPos) -> Self {
BlockRange {
start: start.clone(),
end: end.clone(),
}
}
pub fn within_range(&self, pos: &BlockPos) -> bool {
let minx = min(self.start.x, self.end.x);
let maxx = max(self.start.x, self.end.x);
if pos.x < minx || pos.x > maxx {
return false;
}
let miny = min(self.start.y, self.end.y);
let maxy = max(self.start.y, self.end.y);
if pos.y < miny || pos.y > maxy {
return false;
}
let minz = min(self.start.z, self.end.z);
let maxz = max(self.start.z, self.end.z);
if pos.z < minz || pos.z > maxz {
return false;
}
true
}
}
/// BlockID represents the type of block stored
#[repr(u8)]
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum BlockID {
Empty,
Generic,
}

View File

@ -1,23 +0,0 @@
use crate::storage::world::{BlockID, BlockPos};
use axum::{routing::get, Router};
pub trait StorageServer {
/// `change_block` changes the block at the world position given by `world_position` to the
/// target block id `BlockID`
fn change_block(&mut self, target_state: BlockID, world_position: &BlockPos);
fn change_block_range(&mut self, target_stage: BlockID, start: &BlockPos, end: &BlockPos);
/// `read_block_at` returns the id of the block at the location specified
/// If no block is present, the returned id will be of the empty type
fn read_block_at(&mut self, pos: &BlockPos) -> BlockID;
}
#[tokio::main]
pub async fn main() {
let app = Router::new().route("/", get(|| async { "Hello World" }));
axum::Server::bind(&"0.0.0.0:5000".parse().unwrap())
.serve(app.into_make_service())
.await
.unwrap();
}

View File

@ -1,108 +0,0 @@
use crate::simple_server::server::SimpleServer;
use crate::storage::world::{BlockPos, BlockRange};
#[cfg(test)]
mod tests {
use crate::{storage::world::BlockID, storage_server::StorageServer};
use super::*;
#[test]
fn within_two_dimensions() {
// Get two points on the same z axis
let first = BlockPos::new(0, 0, 0);
let second = BlockPos::new(4, 4, 0);
let range = BlockRange::new(&first, &second);
let test1 = BlockPos::new(1, 1, 0);
let test2 = BlockPos::new(0, 0, 0);
let test3 = BlockPos::new(0, 4, 0);
let test4 = BlockPos::new(4, 4, 0);
let test5 = BlockPos::new(4, 0, 0);
assert!(range.within_range(&test1));
assert!(range.within_range(&test2));
assert!(range.within_range(&test3));
assert!(range.within_range(&test4));
assert!(range.within_range(&test5));
let test6 = BlockPos::new(-1, 0, 0);
assert!(!range.within_range(&test6));
}
#[test]
fn test_simple_insert() {
let mut server = SimpleServer::new();
server.change_block(BlockID::Generic, &BlockPos::new(0, 0, 0));
// Make sure the server only creates one chunk
assert_eq!(server.num_chunks(), 1);
// Retrieve one value
assert_eq!(
server.read_block_at(&BlockPos::new(0, 0, 0)),
BlockID::Generic
);
// Retrieve an empty value in the current chunk
assert_eq!(
server.read_block_at(&BlockPos::new(1, 1, 1)),
BlockID::Empty
);
// Retrieve a value in an empty chunk
assert_eq!(
server.read_block_at(&BlockPos::new(32, 32, 32)),
BlockID::Empty
);
// Make sure a chunk was not created on that read
assert_eq!(server.num_chunks(), 1);
}
#[test]
fn test_remove_one_block() {
let mut server = SimpleServer::new();
let pos = BlockPos::new(0, 0, 0);
server.change_block(BlockID::Generic, &pos);
assert_eq!(server.num_chunks(), 1);
assert_eq!(server.read_block_at(&pos), BlockID::Generic);
server.change_block(BlockID::Empty, &pos);
assert_eq!(server.read_block_at(&pos), BlockID::Empty);
}
#[test]
fn test_insert_some_blocks() {
let mut server = SimpleServer::new();
let blocks = [
BlockPos::new(0, 2, 0),
BlockPos::new(0, 2, 1),
BlockPos::new(0, 2, -1),
BlockPos::new(1, 2, 0),
BlockPos::new(-1, 2, 0),
BlockPos::new(0, 3, 0),
BlockPos::new(0, 0, 0),
];
for pos in blocks.iter() {
server.change_block(BlockID::Generic, pos);
}
assert_eq!(server.num_chunks(), 1);
for pos in blocks.iter() {
let read = server.read_block_at(pos);
println!("Pos: {:?}, {:?}", pos, read);
assert_eq!(read, BlockID::Generic);
}
}
}

View File

@ -1,2 +0,0 @@
mod insert_one;
mod performance_testing;

View File

@ -1,154 +0,0 @@
extern crate test;
#[cfg(test)]
mod tests {
use super::*;
use crate::{
simple_server::server::SimpleServer,
storage::world::{BlockID, BlockPos},
storage_server::StorageServer,
};
use rand::prelude::*;
use test::Bencher;
#[bench]
fn bench_add_sequential_elements(b: &mut Bencher) {
let mut server = SimpleServer::new();
let mut x = 0;
b.iter(|| {
server.change_block(BlockID::Generic, &BlockPos::new(x, 0, 0));
x += 1;
});
}
#[bench]
fn bench_add_clustered_points(b: &mut Bencher) {
let mut server = SimpleServer::new();
let mut rng = rand::thread_rng();
static MAX_RANGE: isize = 128;
b.iter(|| {
let x: isize = rng.gen_range(-MAX_RANGE..MAX_RANGE);
let y: usize = rng.gen::<u8>() as usize;
let z: isize = rng.gen_range(-MAX_RANGE..MAX_RANGE);
server.change_block(BlockID::Generic, &BlockPos::new(x, y, z));
});
}
#[bench]
fn bench_add_spread_out_points(b: &mut Bencher) {
let mut server = SimpleServer::new();
let mut rng = rand::thread_rng();
static MAX_RANGE: isize = 65536;
b.iter(|| {
let x: isize = rng.gen_range(-MAX_RANGE..MAX_RANGE);
let y: usize = rng.gen::<u8>() as usize;
let z: isize = rng.gen_range(-MAX_RANGE..MAX_RANGE);
server.change_block(BlockID::Generic, &BlockPos::new(x, y, z));
});
}
#[bench]
fn bench_insert_and_read_clustered(b: &mut Bencher) {
let mut server = SimpleServer::new();
let mut rng = rand::thread_rng();
static NUM_BLOCKS: usize = 1_000;
static MAX_RANGE: isize = 128;
let mut positions = Vec::with_capacity(NUM_BLOCKS);
for _ in 0..NUM_BLOCKS {
let x: isize = rng.gen_range(-MAX_RANGE..MAX_RANGE);
let y: usize = rng.gen::<u8>() as usize;
let z: isize = rng.gen_range(-MAX_RANGE..MAX_RANGE);
let pos = BlockPos::new(x, y, z);
server.change_block(BlockID::Generic, &BlockPos::new(x, y, z));
positions.push(pos);
}
b.iter(|| {
for i in 0..NUM_BLOCKS {
assert_eq!(server.read_block_at(&positions[i]), BlockID::Generic);
}
});
}
#[bench]
fn bench_insert_and_read_cache(b: &mut Bencher) {
let mut server = SimpleServer::new();
let mut rng = rand::thread_rng();
static NUM_BLOCKS: usize = 1_000;
static MAX_RANGE: isize = 128;
static EXPANDED_RANGE: isize = 2048;
let mut positions = Vec::with_capacity(NUM_BLOCKS);
for _ in 0..NUM_BLOCKS {
let x: isize = rng.gen_range(-MAX_RANGE..MAX_RANGE);
let y: usize = rng.gen::<u8>() as usize;
let z: isize = rng.gen_range(-MAX_RANGE..MAX_RANGE);
let pos = BlockPos::new(x, y, z);
server.change_block(BlockID::Generic, &BlockPos::new(x, y, z));
positions.push(pos);
}
b.iter(|| {
// Read blocks that are already in the server
for i in 0..NUM_BLOCKS {
assert_eq!(server.read_block_at(&positions[i]), BlockID::Generic);
}
// Read blocks that might not be in the server, triggering a miss
for _ in 0..NUM_BLOCKS {
let x: isize = rng.gen_range(-EXPANDED_RANGE..EXPANDED_RANGE);
let y: usize = rng.gen::<u8>() as usize;
let z: isize = rng.gen_range(-EXPANDED_RANGE..EXPANDED_RANGE);
server.read_block_at(&BlockPos::new(x, y, z));
}
});
}
#[bench]
fn bench_clustered_many_misses(b: &mut Bencher) {
let mut server = SimpleServer::new();
let mut rng = rand::thread_rng();
static NUM_BLOCKS: usize = 1_000;
static MAX_RANGE: isize = 128;
static EXPANDED_RANGE: isize = 2048;
for _ in 0..NUM_BLOCKS {
let x: isize = rng.gen_range(-MAX_RANGE..MAX_RANGE);
let y: usize = rng.gen::<u8>() as usize;
let z: isize = rng.gen_range(-MAX_RANGE..MAX_RANGE);
server.change_block(BlockID::Generic, &BlockPos::new(x, y, z));
}
b.iter(|| {
// Read blocks that might not be in the server, triggering a miss
for _ in 0..NUM_BLOCKS {
let x: isize = rng.gen_range(-EXPANDED_RANGE..EXPANDED_RANGE);
let y: usize = rng.gen::<u8>() as usize;
let z: isize = rng.gen_range(-EXPANDED_RANGE..EXPANDED_RANGE);
server.read_block_at(&BlockPos::new(x, y, z));
}
});
}
}