From 31f3c8e21db998d79507001b27a6662446a2dd48 Mon Sep 17 00:00:00 2001 From: Nicholas Novak <34256932+NickyBoy89@users.noreply.github.com> Date: Sun, 10 Dec 2023 21:28:48 -0800 Subject: [PATCH] feat: Added parallel initialization to inmemory and hash servers --- server/hashserver.go | 27 +++---------------- server/inmemory_server.go | 34 ++---------------------- storage/file_operations.go | 53 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 56 deletions(-) diff --git a/server/hashserver.go b/server/hashserver.go index dff338f..1073658 100644 --- a/server/hashserver.go +++ b/server/hashserver.go @@ -1,12 +1,7 @@ package server import ( - "encoding/json" - "os" - "path/filepath" - - log "github.com/sirupsen/logrus" - + "git.nicholasnovak.io/nnovak/spatial-db/storage" "git.nicholasnovak.io/nnovak/spatial-db/world" ) @@ -17,26 +12,12 @@ type HashServer struct { func (hs *HashServer) SetStorageRoot(path string) { hs.blocks = make(map[world.BlockPos]world.BlockID) - chunkFiles, err := os.ReadDir(path) + chunks, err := storage.ReadParallelFromDirectory(path) if err != nil { panic(err) } - for chunkIndex, chunkFile := range chunkFiles { - var data world.ChunkData - - log.Infof("Reading in chunk %d of %d", chunkIndex, len(chunkFiles)) - - f, err := os.Open(filepath.Join(path, chunkFile.Name())) - if err != nil { - panic(err) - } - - // Read each file from disk - if err := json.NewDecoder(f).Decode(&data); err != nil { - panic(err) - } - + for _, data := range chunks { // Load in each data point from disk for _, section := range data.Sections { for blockIndex, blockState := range section.BlockStates { @@ -44,8 +25,6 @@ func (hs *HashServer) SetStorageRoot(path string) { hs.blocks[pos] = blockState } } - - f.Close() } } diff --git a/server/inmemory_server.go b/server/inmemory_server.go index 7997aa2..5cb40f8 100644 --- a/server/inmemory_server.go +++ b/server/inmemory_server.go @@ -5,7 +5,6 @@ import ( "io/fs" "os" "path/filepath" - "strings" "git.nicholasnovak.io/nnovak/spatial-db/storage" "git.nicholasnovak.io/nnovak/spatial-db/world" @@ -19,41 +18,12 @@ type InMemoryServer struct { func (s *InMemoryServer) SetStorageRoot(path string) { s.StorageDir = path - chunkFiles, err := os.ReadDir(s.StorageDir) + chunks, err := storage.ReadParallelFromDirectory(s.StorageDir) if err != nil { panic(err) } - s.Chunks = make(map[world.ChunkPos]world.ChunkData) - - validChunkFiles := []fs.DirEntry{} - for _, chunkFile := range chunkFiles { - if chunkFile.IsDir() || !strings.HasSuffix(chunkFile.Name(), ".chunk") { - continue - } - validChunkFiles = append(validChunkFiles, chunkFile) - } - - chunks := make([]world.ChunkData, len(validChunkFiles)) - - for chunkIndex, chunkFile := range validChunkFiles { - go func(index int, cf fs.DirEntry) { - file, err := os.Open(filepath.Join(s.StorageDir, cf.Name())) - if err != nil { - panic(err) - } - - chunkData, err := storage.ReadChunkFromFile(file) - if err != nil { - panic(err) - } - - file.Close() - - chunks[index] = chunkData - }(chunkIndex, chunkFile) - } - + s.Chunks = make(map[world.ChunkPos]world.ChunkData, len(chunks)) for _, chunkData := range chunks { s.Chunks[chunkData.Pos] = chunkData } diff --git a/storage/file_operations.go b/storage/file_operations.go index c7a191c..6900e7b 100644 --- a/storage/file_operations.go +++ b/storage/file_operations.go @@ -2,7 +2,11 @@ package storage import ( "encoding/json" + "io/fs" "os" + "path/filepath" + "strings" + "sync" "git.nicholasnovak.io/nnovak/spatial-db/world" ) @@ -16,3 +20,52 @@ func ReadChunkFromFile(chunkFile *os.File) (world.ChunkData, error) { return chunkData, nil } + +func ReadParallelFromDirectory(dirName string) ([]world.ChunkData, error) { + chunkFiles, err := os.ReadDir(dirName) + if err != nil { + panic(err) + } + + // Filter invalid chunks + + validChunkFiles := []fs.DirEntry{} + for _, chunkFile := range chunkFiles { + if chunkFile.IsDir() || !strings.HasSuffix(chunkFile.Name(), ".chunk") { + continue + } + validChunkFiles = append(validChunkFiles, chunkFile) + } + + chunks := make([]world.ChunkData, len(validChunkFiles)) + + var wg sync.WaitGroup + wg.Add(len(validChunkFiles)) + + for fileIndex, chunkFile := range validChunkFiles { + // Avoid implicit copies + chunkFile := chunkFile + fileIndex := fileIndex + + go func() { + defer wg.Done() + file, err := os.Open(filepath.Join(dirName, chunkFile.Name())) + if err != nil { + panic(err) + } + + chunkData, err := ReadChunkFromFile(file) + if err != nil { + panic(err) + } + + file.Close() + + chunks[fileIndex] = chunkData + }() + } + + wg.Wait() + + return chunks, nil +}