feat: Added parallel initialization to inmemory and hash servers

This commit is contained in:
Nicholas Novak 2023-12-10 21:28:48 -08:00
parent 438c015767
commit 31f3c8e21d
3 changed files with 58 additions and 56 deletions

View File

@ -1,12 +1,7 @@
package server
import (
"encoding/json"
"os"
"path/filepath"
log "github.com/sirupsen/logrus"
"git.nicholasnovak.io/nnovak/spatial-db/storage"
"git.nicholasnovak.io/nnovak/spatial-db/world"
)
@ -17,26 +12,12 @@ type HashServer struct {
func (hs *HashServer) SetStorageRoot(path string) {
hs.blocks = make(map[world.BlockPos]world.BlockID)
chunkFiles, err := os.ReadDir(path)
chunks, err := storage.ReadParallelFromDirectory(path)
if err != nil {
panic(err)
}
for chunkIndex, chunkFile := range chunkFiles {
var data world.ChunkData
log.Infof("Reading in chunk %d of %d", chunkIndex, len(chunkFiles))
f, err := os.Open(filepath.Join(path, chunkFile.Name()))
if err != nil {
panic(err)
}
// Read each file from disk
if err := json.NewDecoder(f).Decode(&data); err != nil {
panic(err)
}
for _, data := range chunks {
// Load in each data point from disk
for _, section := range data.Sections {
for blockIndex, blockState := range section.BlockStates {
@ -44,8 +25,6 @@ func (hs *HashServer) SetStorageRoot(path string) {
hs.blocks[pos] = blockState
}
}
f.Close()
}
}

View File

@ -5,7 +5,6 @@ import (
"io/fs"
"os"
"path/filepath"
"strings"
"git.nicholasnovak.io/nnovak/spatial-db/storage"
"git.nicholasnovak.io/nnovak/spatial-db/world"
@ -19,41 +18,12 @@ type InMemoryServer struct {
func (s *InMemoryServer) SetStorageRoot(path string) {
s.StorageDir = path
chunkFiles, err := os.ReadDir(s.StorageDir)
chunks, err := storage.ReadParallelFromDirectory(s.StorageDir)
if err != nil {
panic(err)
}
s.Chunks = make(map[world.ChunkPos]world.ChunkData)
validChunkFiles := []fs.DirEntry{}
for _, chunkFile := range chunkFiles {
if chunkFile.IsDir() || !strings.HasSuffix(chunkFile.Name(), ".chunk") {
continue
}
validChunkFiles = append(validChunkFiles, chunkFile)
}
chunks := make([]world.ChunkData, len(validChunkFiles))
for chunkIndex, chunkFile := range validChunkFiles {
go func(index int, cf fs.DirEntry) {
file, err := os.Open(filepath.Join(s.StorageDir, cf.Name()))
if err != nil {
panic(err)
}
chunkData, err := storage.ReadChunkFromFile(file)
if err != nil {
panic(err)
}
file.Close()
chunks[index] = chunkData
}(chunkIndex, chunkFile)
}
s.Chunks = make(map[world.ChunkPos]world.ChunkData, len(chunks))
for _, chunkData := range chunks {
s.Chunks[chunkData.Pos] = chunkData
}

View File

@ -2,7 +2,11 @@ package storage
import (
"encoding/json"
"io/fs"
"os"
"path/filepath"
"strings"
"sync"
"git.nicholasnovak.io/nnovak/spatial-db/world"
)
@ -16,3 +20,52 @@ func ReadChunkFromFile(chunkFile *os.File) (world.ChunkData, error) {
return chunkData, nil
}
func ReadParallelFromDirectory(dirName string) ([]world.ChunkData, error) {
chunkFiles, err := os.ReadDir(dirName)
if err != nil {
panic(err)
}
// Filter invalid chunks
validChunkFiles := []fs.DirEntry{}
for _, chunkFile := range chunkFiles {
if chunkFile.IsDir() || !strings.HasSuffix(chunkFile.Name(), ".chunk") {
continue
}
validChunkFiles = append(validChunkFiles, chunkFile)
}
chunks := make([]world.ChunkData, len(validChunkFiles))
var wg sync.WaitGroup
wg.Add(len(validChunkFiles))
for fileIndex, chunkFile := range validChunkFiles {
// Avoid implicit copies
chunkFile := chunkFile
fileIndex := fileIndex
go func() {
defer wg.Done()
file, err := os.Open(filepath.Join(dirName, chunkFile.Name()))
if err != nil {
panic(err)
}
chunkData, err := ReadChunkFromFile(file)
if err != nil {
panic(err)
}
file.Close()
chunks[fileIndex] = chunkData
}()
}
wg.Wait()
return chunks, nil
}