Compare commits
No commits in common. "5ad8bd2407c384769a93773dd59a4fee10d9564b" and "e0f1cd2436032e48df36c826317fbbad56856a88" have entirely different histories.
5ad8bd2407
...
e0f1cd2436
3
Makefile
3
Makefile
@ -5,12 +5,15 @@ SAVES += witchcraft-save
|
||||
all: $(SAVES)
|
||||
|
||||
imperial-save: compile
|
||||
mkdir imperial-save
|
||||
./spatial-db load worldsave "saves/Imperialcity v14.1/region" --output "imperial-save"
|
||||
|
||||
skygrid-save: compile
|
||||
mkdir skygrid-save
|
||||
./spatial-db load worldsave "saves/SkyGrid/region" --output "skygrid-save"
|
||||
|
||||
witchcraft-save: compile
|
||||
mkdir witchcraft-save
|
||||
./spatial-db load worldsave "saves/Witchcraft/region" --output "witchcraft-save"
|
||||
|
||||
.PHONY: compile
|
||||
|
@ -1,11 +1,11 @@
|
||||
package loading
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"git.nicholasnovak.io/nnovak/spatial-db/storage"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@ -32,13 +32,6 @@ var LoadSaveDirCommand = &cobra.Command{
|
||||
|
||||
log.Infof("Loading save directory of %s", args[0])
|
||||
|
||||
u, err := storage.CreateUnityFile(saveOutputDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer u.Close()
|
||||
defer u.WriteMetadataFile(saveOutputDir + ".metadata")
|
||||
|
||||
for regionIndex, regionFile := range regionFiles {
|
||||
if regionFile.IsDir() {
|
||||
continue
|
||||
@ -65,9 +58,22 @@ var LoadSaveDirCommand = &cobra.Command{
|
||||
|
||||
// Save each chunk to a separate file
|
||||
for _, chunk := range chunks {
|
||||
if err := u.WriteChunk(chunk); err != nil {
|
||||
chunkFilename := chunk.Pos.ToFileName()
|
||||
|
||||
outfile, err := os.OpenFile(
|
||||
filepath.Join(saveOutputDir, chunkFilename),
|
||||
os.O_WRONLY|os.O_CREATE|os.O_APPEND,
|
||||
0664,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(outfile).Encode(chunk); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outfile.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,13 +13,7 @@ type HashServer struct {
|
||||
func (hs *HashServer) SetStorageRoot(path string) {
|
||||
hs.blocks = make(map[world.BlockPos]world.BlockID)
|
||||
|
||||
u, err := storage.OpenUnityFile(path, path+".metadata")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer u.Close()
|
||||
|
||||
chunks, err := u.ReadAllChunks()
|
||||
chunks, err := storage.ReadParallelFromDirectory(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -18,13 +18,7 @@ type InMemoryServer struct {
|
||||
func (s *InMemoryServer) SetStorageRoot(path string) {
|
||||
s.StorageDir = path
|
||||
|
||||
u, err := storage.OpenUnityFile(s.StorageDir, s.StorageDir+".metadata")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer u.Close()
|
||||
|
||||
chunks, err := u.ReadAllChunks()
|
||||
chunks, err := storage.ReadParallelFromDirectory(s.StorageDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -14,18 +14,13 @@ import (
|
||||
const fileCacheSize = 8
|
||||
|
||||
type SimpleServer struct {
|
||||
StorageDir string
|
||||
storageBackend storage.UnityFile
|
||||
StorageDir string
|
||||
cache storage.FileCache
|
||||
}
|
||||
|
||||
func (s *SimpleServer) SetStorageRoot(path string) {
|
||||
s.StorageDir = path
|
||||
|
||||
var err error
|
||||
s.storageBackend, err = storage.OpenUnityFile(path, path+".metadata")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s.cache = storage.NewFileCache(256)
|
||||
}
|
||||
|
||||
// Filesystem operations
|
||||
@ -63,7 +58,11 @@ func (s *SimpleServer) FetchOrCreateChunk(pos world.ChunkPos) (world.ChunkData,
|
||||
|
||||
// `FetchChunk' fetches the chunk's data, given the chunk's position
|
||||
func (s *SimpleServer) FetchChunk(pos world.ChunkPos) (world.ChunkData, error) {
|
||||
chunkData, err := s.storageBackend.ReadChunk(pos)
|
||||
chunkFileName := filepath.Join(s.StorageDir, pos.ToFileName())
|
||||
|
||||
var chunkData world.ChunkData
|
||||
|
||||
chunkFile, err := s.cache.FetchFile(chunkFileName)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return chunkData, storage.ChunkNotFoundError
|
||||
@ -72,7 +71,7 @@ func (s *SimpleServer) FetchChunk(pos world.ChunkPos) (world.ChunkData, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return chunkData, nil
|
||||
return storage.ReadChunkFromFile(chunkFile)
|
||||
}
|
||||
|
||||
// Voxel server implementation
|
||||
|
@ -21,8 +21,8 @@ type UnityFile struct {
|
||||
}
|
||||
|
||||
type fileMetadata struct {
|
||||
StartOffset int `json:"start_offset"`
|
||||
FileSize int `json:"file_size"`
|
||||
startOffset int
|
||||
fileSize int
|
||||
}
|
||||
|
||||
func CreateUnityFile(fileName string) (UnityFile, error) {
|
||||
@ -39,23 +39,6 @@ func CreateUnityFile(fileName string) (UnityFile, error) {
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func OpenUnityFile(fileName, metadataName string) (UnityFile, error) {
|
||||
var u UnityFile
|
||||
|
||||
// Read the file
|
||||
f, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return u, err
|
||||
}
|
||||
u.fd = f
|
||||
|
||||
if err := u.ReadMetadataFile(metadataName); err != nil {
|
||||
return u, err
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func (u UnityFile) Size() int {
|
||||
return u.fileSize
|
||||
}
|
||||
@ -71,7 +54,7 @@ func (u *UnityFile) WriteChunk(data world.ChunkData) error {
|
||||
encodedSize := encoded.Len()
|
||||
|
||||
// Go to the end of the file
|
||||
u.fd.Seek(int64(u.fileSize), io.SeekStart)
|
||||
u.fd.Seek(0, u.fileSize)
|
||||
// Write the encoded contents to the file
|
||||
if _, err := u.fd.Write(encoded.Bytes()); err != nil {
|
||||
return err
|
||||
@ -79,8 +62,8 @@ func (u *UnityFile) WriteChunk(data world.ChunkData) error {
|
||||
|
||||
// Update the metadata with the new file
|
||||
u.metadata[data.Pos] = fileMetadata{
|
||||
StartOffset: u.fileSize,
|
||||
FileSize: encodedSize,
|
||||
startOffset: u.fileSize,
|
||||
fileSize: encodedSize,
|
||||
}
|
||||
u.fileSize += encodedSize
|
||||
|
||||
@ -117,9 +100,9 @@ func (u *UnityFile) ReadMetadataFile(fileName string) error {
|
||||
func (u UnityFile) ReadChunk(pos world.ChunkPos) (world.ChunkData, error) {
|
||||
m := u.metadata[pos]
|
||||
|
||||
u.fd.Seek(int64(m.StartOffset), io.SeekStart)
|
||||
u.fd.Seek(0, m.startOffset)
|
||||
|
||||
fileReader := io.LimitReader(u.fd, int64(m.FileSize))
|
||||
fileReader := io.LimitReader(u.fd, int64(m.fileSize))
|
||||
|
||||
var data world.ChunkData
|
||||
if err := json.NewDecoder(fileReader).Decode(&data); err != nil {
|
||||
@ -128,21 +111,3 @@ func (u UnityFile) ReadChunk(pos world.ChunkPos) (world.ChunkData, error) {
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (u UnityFile) ReadAllChunks() ([]world.ChunkData, error) {
|
||||
chunks := []world.ChunkData{}
|
||||
|
||||
for pos := range u.metadata {
|
||||
chunk, err := u.ReadChunk(pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
func (u *UnityFile) Close() error {
|
||||
return u.fd.Close()
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
@ -70,7 +69,6 @@ func TestWriteMultipleFiles(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %v", err)
|
||||
}
|
||||
fmt.Println(tempDir)
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
u, err := CreateUnityFile(path.Join(tempDir, "test-unity"))
|
||||
@ -121,60 +119,3 @@ func TestWriteMultipleFiles(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAllChunks(t *testing.T) {
|
||||
tempDir, err := os.MkdirTemp("", "unity")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temporary directory: %v", err)
|
||||
}
|
||||
fmt.Println(tempDir)
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
u, err := CreateUnityFile(path.Join(tempDir, "test-unity"))
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating unity file: %v", err)
|
||||
}
|
||||
|
||||
var (
|
||||
chunk1 world.ChunkData
|
||||
chunk2 world.ChunkData
|
||||
chunk3 world.ChunkData
|
||||
)
|
||||
chunk1.Pos = world.ChunkPos{
|
||||
X: 0,
|
||||
Z: 0,
|
||||
}
|
||||
chunk1.Sections[0].BlockStates[0] = 2
|
||||
chunk2.Sections[0].BlockStates[0] = 3
|
||||
chunk2.Pos = world.ChunkPos{
|
||||
X: 1,
|
||||
Z: 0,
|
||||
}
|
||||
chunk3.Sections[0].BlockStates[0] = 4
|
||||
chunk3.Pos = world.ChunkPos{
|
||||
X: 2,
|
||||
Z: 0,
|
||||
}
|
||||
|
||||
chunks := []world.ChunkData{chunk1, chunk2, chunk3}
|
||||
|
||||
// Write all chunks
|
||||
for _, data := range chunks {
|
||||
if err := u.WriteChunk(data); err != nil {
|
||||
t.Fatalf("Error writing chunk: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Log(chunks)
|
||||
|
||||
readChunks, err := u.ReadAllChunks()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading chunks: %v", err)
|
||||
}
|
||||
|
||||
for index, chunk := range readChunks {
|
||||
if !reflect.DeepEqual(chunk, chunks[index]) {
|
||||
t.Fatalf("Chunks were not equal")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user