Compare commits

...

4 Commits

Author SHA1 Message Date
Nicholas Novak
5ad8bd2407 feat: Moved over all of the servers to use unity files 2023-12-11 16:43:10 -08:00
Nicholas Novak
07b676c571 fix: Fixed reversed offsets for unity files 2023-12-11 16:38:46 -08:00
Nicholas Novak
81915ff4f3 fix: Fixed metadata not being saved 2023-12-11 09:57:11 -08:00
Nicholas Novak
e1d4537af7 change: Updated save loading to work with unity files 2023-12-11 09:56:50 -08:00
7 changed files with 134 additions and 36 deletions

View File

@ -5,15 +5,12 @@ SAVES += witchcraft-save
all: $(SAVES) all: $(SAVES)
imperial-save: compile imperial-save: compile
mkdir imperial-save
./spatial-db load worldsave "saves/Imperialcity v14.1/region" --output "imperial-save" ./spatial-db load worldsave "saves/Imperialcity v14.1/region" --output "imperial-save"
skygrid-save: compile skygrid-save: compile
mkdir skygrid-save
./spatial-db load worldsave "saves/SkyGrid/region" --output "skygrid-save" ./spatial-db load worldsave "saves/SkyGrid/region" --output "skygrid-save"
witchcraft-save: compile witchcraft-save: compile
mkdir witchcraft-save
./spatial-db load worldsave "saves/Witchcraft/region" --output "witchcraft-save" ./spatial-db load worldsave "saves/Witchcraft/region" --output "witchcraft-save"
.PHONY: compile .PHONY: compile

View File

@ -1,11 +1,11 @@
package loading package loading
import ( import (
"encoding/json"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"git.nicholasnovak.io/nnovak/spatial-db/storage"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -32,6 +32,13 @@ var LoadSaveDirCommand = &cobra.Command{
log.Infof("Loading save directory of %s", args[0]) log.Infof("Loading save directory of %s", args[0])
u, err := storage.CreateUnityFile(saveOutputDir)
if err != nil {
return err
}
defer u.Close()
defer u.WriteMetadataFile(saveOutputDir + ".metadata")
for regionIndex, regionFile := range regionFiles { for regionIndex, regionFile := range regionFiles {
if regionFile.IsDir() { if regionFile.IsDir() {
continue continue
@ -58,22 +65,9 @@ var LoadSaveDirCommand = &cobra.Command{
// Save each chunk to a separate file // Save each chunk to a separate file
for _, chunk := range chunks { for _, chunk := range chunks {
chunkFilename := chunk.Pos.ToFileName() if err := u.WriteChunk(chunk); err != nil {
outfile, err := os.OpenFile(
filepath.Join(saveOutputDir, chunkFilename),
os.O_WRONLY|os.O_CREATE|os.O_APPEND,
0664,
)
if err != nil {
return err return err
} }
if err := json.NewEncoder(outfile).Encode(chunk); err != nil {
return err
}
outfile.Close()
} }
} }

View File

@ -13,7 +13,13 @@ type HashServer struct {
func (hs *HashServer) SetStorageRoot(path string) { func (hs *HashServer) SetStorageRoot(path string) {
hs.blocks = make(map[world.BlockPos]world.BlockID) hs.blocks = make(map[world.BlockPos]world.BlockID)
chunks, err := storage.ReadParallelFromDirectory(path) u, err := storage.OpenUnityFile(path, path+".metadata")
if err != nil {
panic(err)
}
defer u.Close()
chunks, err := u.ReadAllChunks()
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -18,7 +18,13 @@ type InMemoryServer struct {
func (s *InMemoryServer) SetStorageRoot(path string) { func (s *InMemoryServer) SetStorageRoot(path string) {
s.StorageDir = path s.StorageDir = path
chunks, err := storage.ReadParallelFromDirectory(s.StorageDir) u, err := storage.OpenUnityFile(s.StorageDir, s.StorageDir+".metadata")
if err != nil {
panic(err)
}
defer u.Close()
chunks, err := u.ReadAllChunks()
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -14,13 +14,18 @@ import (
const fileCacheSize = 8 const fileCacheSize = 8
type SimpleServer struct { type SimpleServer struct {
StorageDir string StorageDir string
cache storage.FileCache storageBackend storage.UnityFile
} }
func (s *SimpleServer) SetStorageRoot(path string) { func (s *SimpleServer) SetStorageRoot(path string) {
s.StorageDir = path s.StorageDir = path
s.cache = storage.NewFileCache(256)
var err error
s.storageBackend, err = storage.OpenUnityFile(path, path+".metadata")
if err != nil {
panic(err)
}
} }
// Filesystem operations // Filesystem operations
@ -58,11 +63,7 @@ func (s *SimpleServer) FetchOrCreateChunk(pos world.ChunkPos) (world.ChunkData,
// `FetchChunk' fetches the chunk's data, given the chunk's position // `FetchChunk' fetches the chunk's data, given the chunk's position
func (s *SimpleServer) FetchChunk(pos world.ChunkPos) (world.ChunkData, error) { func (s *SimpleServer) FetchChunk(pos world.ChunkPos) (world.ChunkData, error) {
chunkFileName := filepath.Join(s.StorageDir, pos.ToFileName()) chunkData, err := s.storageBackend.ReadChunk(pos)
var chunkData world.ChunkData
chunkFile, err := s.cache.FetchFile(chunkFileName)
if err != nil { if err != nil {
if errors.Is(err, fs.ErrNotExist) { if errors.Is(err, fs.ErrNotExist) {
return chunkData, storage.ChunkNotFoundError return chunkData, storage.ChunkNotFoundError
@ -71,7 +72,7 @@ func (s *SimpleServer) FetchChunk(pos world.ChunkPos) (world.ChunkData, error) {
} }
} }
return storage.ReadChunkFromFile(chunkFile) return chunkData, nil
} }
// Voxel server implementation // Voxel server implementation

View File

@ -21,8 +21,8 @@ type UnityFile struct {
} }
type fileMetadata struct { type fileMetadata struct {
startOffset int StartOffset int `json:"start_offset"`
fileSize int FileSize int `json:"file_size"`
} }
func CreateUnityFile(fileName string) (UnityFile, error) { func CreateUnityFile(fileName string) (UnityFile, error) {
@ -39,6 +39,23 @@ func CreateUnityFile(fileName string) (UnityFile, error) {
return u, nil return u, nil
} }
func OpenUnityFile(fileName, metadataName string) (UnityFile, error) {
var u UnityFile
// Read the file
f, err := os.Open(fileName)
if err != nil {
return u, err
}
u.fd = f
if err := u.ReadMetadataFile(metadataName); err != nil {
return u, err
}
return u, nil
}
func (u UnityFile) Size() int { func (u UnityFile) Size() int {
return u.fileSize return u.fileSize
} }
@ -54,7 +71,7 @@ func (u *UnityFile) WriteChunk(data world.ChunkData) error {
encodedSize := encoded.Len() encodedSize := encoded.Len()
// Go to the end of the file // Go to the end of the file
u.fd.Seek(0, u.fileSize) u.fd.Seek(int64(u.fileSize), io.SeekStart)
// Write the encoded contents to the file // Write the encoded contents to the file
if _, err := u.fd.Write(encoded.Bytes()); err != nil { if _, err := u.fd.Write(encoded.Bytes()); err != nil {
return err return err
@ -62,8 +79,8 @@ func (u *UnityFile) WriteChunk(data world.ChunkData) error {
// Update the metadata with the new file // Update the metadata with the new file
u.metadata[data.Pos] = fileMetadata{ u.metadata[data.Pos] = fileMetadata{
startOffset: u.fileSize, StartOffset: u.fileSize,
fileSize: encodedSize, FileSize: encodedSize,
} }
u.fileSize += encodedSize u.fileSize += encodedSize
@ -100,9 +117,9 @@ func (u *UnityFile) ReadMetadataFile(fileName string) error {
func (u UnityFile) ReadChunk(pos world.ChunkPos) (world.ChunkData, error) { func (u UnityFile) ReadChunk(pos world.ChunkPos) (world.ChunkData, error) {
m := u.metadata[pos] m := u.metadata[pos]
u.fd.Seek(0, m.startOffset) u.fd.Seek(int64(m.StartOffset), io.SeekStart)
fileReader := io.LimitReader(u.fd, int64(m.fileSize)) fileReader := io.LimitReader(u.fd, int64(m.FileSize))
var data world.ChunkData var data world.ChunkData
if err := json.NewDecoder(fileReader).Decode(&data); err != nil { if err := json.NewDecoder(fileReader).Decode(&data); err != nil {
@ -111,3 +128,21 @@ func (u UnityFile) ReadChunk(pos world.ChunkPos) (world.ChunkData, error) {
return data, nil return data, nil
} }
func (u UnityFile) ReadAllChunks() ([]world.ChunkData, error) {
chunks := []world.ChunkData{}
for pos := range u.metadata {
chunk, err := u.ReadChunk(pos)
if err != nil {
return nil, err
}
chunks = append(chunks, chunk)
}
return chunks, nil
}
func (u *UnityFile) Close() error {
return u.fd.Close()
}

View File

@ -1,6 +1,7 @@
package storage package storage
import ( import (
"fmt"
"os" "os"
"path" "path"
"reflect" "reflect"
@ -69,6 +70,7 @@ func TestWriteMultipleFiles(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Error creating temporary directory: %v", err) t.Fatalf("Error creating temporary directory: %v", err)
} }
fmt.Println(tempDir)
defer os.RemoveAll(tempDir) defer os.RemoveAll(tempDir)
u, err := CreateUnityFile(path.Join(tempDir, "test-unity")) u, err := CreateUnityFile(path.Join(tempDir, "test-unity"))
@ -119,3 +121,60 @@ func TestWriteMultipleFiles(t *testing.T) {
} }
} }
} }
func TestReadAllChunks(t *testing.T) {
tempDir, err := os.MkdirTemp("", "unity")
if err != nil {
t.Fatalf("Error creating temporary directory: %v", err)
}
fmt.Println(tempDir)
defer os.RemoveAll(tempDir)
u, err := CreateUnityFile(path.Join(tempDir, "test-unity"))
if err != nil {
t.Fatalf("Error creating unity file: %v", err)
}
var (
chunk1 world.ChunkData
chunk2 world.ChunkData
chunk3 world.ChunkData
)
chunk1.Pos = world.ChunkPos{
X: 0,
Z: 0,
}
chunk1.Sections[0].BlockStates[0] = 2
chunk2.Sections[0].BlockStates[0] = 3
chunk2.Pos = world.ChunkPos{
X: 1,
Z: 0,
}
chunk3.Sections[0].BlockStates[0] = 4
chunk3.Pos = world.ChunkPos{
X: 2,
Z: 0,
}
chunks := []world.ChunkData{chunk1, chunk2, chunk3}
// Write all chunks
for _, data := range chunks {
if err := u.WriteChunk(data); err != nil {
t.Fatalf("Error writing chunk: %v", err)
}
}
t.Log(chunks)
readChunks, err := u.ReadAllChunks()
if err != nil {
t.Fatalf("Error reading chunks: %v", err)
}
for index, chunk := range readChunks {
if !reflect.DeepEqual(chunk, chunks[index]) {
t.Fatalf("Chunks were not equal")
}
}
}