Skip to content
This repository was archived by the owner on Aug 2, 2021. It is now read-only.

Commit 3eff652

Browse files
holisticodenonsense
authored andcommitted
swarm/storage: Get all chunk references for a given file (#19002)
1 parent 75c9570 commit 3eff652

File tree

2 files changed

+76
-0
lines changed

2 files changed

+76
-0
lines changed

swarm/storage/filestore.go

+40
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package storage
1919
import (
2020
"context"
2121
"io"
22+
"sort"
2223
)
2324

2425
/*
@@ -96,3 +97,42 @@ func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEnc
9697
func (f *FileStore) HashSize() int {
9798
return f.hashFunc().Size()
9899
}
100+
101+
// Public API. This endpoint returns all chunk hashes (only) for a given file
102+
func (f *FileStore) GetAllReferences(ctx context.Context, data io.Reader, toEncrypt bool) (addrs AddressCollection, err error) {
103+
// create a special kind of putter, which only will store the references
104+
putter := &HashExplorer{
105+
hasherStore: NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt),
106+
References: make([]Reference, 0),
107+
}
108+
// do the actual splitting anyway, no way around it
109+
_, _, err = PyramidSplit(ctx, data, putter, putter)
110+
if err != nil {
111+
return nil, err
112+
}
113+
// collect all references
114+
addrs = NewAddressCollection(0)
115+
for _, ref := range putter.References {
116+
addrs = append(addrs, Address(ref))
117+
}
118+
sort.Sort(addrs)
119+
return addrs, nil
120+
}
121+
122+
// HashExplorer is a special kind of putter which will only store chunk references
123+
type HashExplorer struct {
124+
*hasherStore
125+
References []Reference
126+
}
127+
128+
// HashExplorer's Put will add just the chunk hashes to its `References`
129+
func (he *HashExplorer) Put(ctx context.Context, chunkData ChunkData) (Reference, error) {
130+
// Need to do the actual Put, which returns the references
131+
ref, err := he.hasherStore.Put(ctx, chunkData)
132+
if err != nil {
133+
return nil, err
134+
}
135+
// internally store the reference
136+
he.References = append(he.References, ref)
137+
return ref, nil
138+
}

swarm/storage/filestore_test.go

+36
Original file line numberDiff line numberDiff line change
@@ -173,3 +173,39 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
173173
t.Fatalf("Comparison error after clearing memStore.")
174174
}
175175
}
176+
177+
// TestGetAllReferences only tests that GetAllReferences returns an expected
178+
// number of references for a given file
179+
func TestGetAllReferences(t *testing.T) {
180+
tdb, cleanup, err := newTestDbStore(false, false)
181+
defer cleanup()
182+
if err != nil {
183+
t.Fatalf("init dbStore failed: %v", err)
184+
}
185+
db := tdb.LDBStore
186+
memStore := NewMemStore(NewDefaultStoreParams(), db)
187+
localStore := &LocalStore{
188+
memStore: memStore,
189+
DbStore: db,
190+
}
191+
fileStore := NewFileStore(localStore, NewFileStoreParams())
192+
193+
checkRefs := func(dataSize int, expectedLen int) {
194+
slice := testutil.RandomBytes(1, dataSize)
195+
196+
addrs, err := fileStore.GetAllReferences(context.Background(), bytes.NewReader(slice), false)
197+
if err != nil {
198+
t.Fatal(err)
199+
}
200+
if len(addrs) != expectedLen {
201+
t.Fatalf("Expected reference array length to be %d, but is %d", expectedLen, len(addrs))
202+
}
203+
}
204+
205+
// testRuns[i] and expectedLen[i] are dataSize and expected length respectively
206+
testRuns := []int{1024, 8192, 16000, 30000, 1000000}
207+
expectedLens := []int{1, 3, 5, 9, 248}
208+
for i, r := range testRuns {
209+
checkRefs(r, expectedLens[i])
210+
}
211+
}

0 commit comments

Comments
 (0)