3
3
4
4
package rocksdb
5
5
6
- /*
7
- #include <stdlib.h>
8
- #include "rocksdb/c.h"
9
-
10
- typedef int * intp;
11
-
12
- extern void replay_iterate(int *wp, void *bp);
13
- */
6
+ // #include <stdlib.h>
7
+ // #include "rocksdb/c.h"
14
8
import "C"
15
9
16
10
import (
@@ -192,7 +186,7 @@ func (db *RDBDatabase) Meter(prefix string) {
192
186
193
187
func (db * RDBDatabase ) NewBatch () ethdb.Batch {
194
188
b := C .rocksdb_writebatch_create ()
195
- bb := & rdbBatch {db : db .db , b : b , wopts : db .wopts }
189
+ bb := & rdbBatch {db : db .db , b : b , wopts : db .wopts , data : nil }
196
190
runtime .SetFinalizer (bb , func (bb * rdbBatch ) {
197
191
if bb .b != nil {
198
192
C .rocksdb_writebatch_destroy (bb .b )
@@ -202,10 +196,17 @@ func (db *RDBDatabase) NewBatch() ethdb.Batch {
202
196
return bb
203
197
}
204
198
199
+ type rdbBatchOp struct {
200
+ del bool
201
+ key []byte
202
+ value []byte
203
+ }
204
+
205
205
type rdbBatch struct {
206
206
db * C.rocksdb_t
207
207
b * C.rocksdb_writebatch_t
208
208
wopts * C.rocksdb_writeoptions_t
209
+ data []* rdbBatchOp
209
210
size int
210
211
}
211
212
@@ -216,6 +217,7 @@ func (b *rdbBatch) Put(key, value []byte) error {
216
217
}
217
218
ck , cv := b2c (key ), b2c (value )
218
219
C .rocksdb_writebatch_put (b .b , ck , C .size_t (len (key )), cv , C .size_t (len (value )))
220
+ b .data = append (b .data , & rdbBatchOp {del : false , key : key , value : value })
219
221
b .size += len (value )
220
222
return nil
221
223
}
@@ -225,6 +227,7 @@ func (b *rdbBatch) Delete(key []byte) error {
225
227
atomic .AddUint64 (& _d_count , 1 )
226
228
}
227
229
C .rocksdb_writebatch_delete (b .b , b2c (key ), C .size_t (len (key )))
230
+ b .data = append (b .data , & rdbBatchOp {del : true , key : key , value : nil })
228
231
b .size += 1
229
232
return nil
230
233
}
@@ -241,60 +244,20 @@ func (b *rdbBatch) ValueSize() int {
241
244
242
245
func (b * rdbBatch ) Reset () {
243
246
C .rocksdb_writebatch_clear (b .b )
247
+ b .data = nil
244
248
b .size = 0
245
249
}
246
250
247
- //export replayPut
248
- func replayPut (ptr * C.char , k * C.char , klen C.size_t , v * C.char , vlen C.size_t ) {
249
- w := (* ethdb .KeyValueWriter )(unsafe .Pointer (ptr ))
250
- if w != nil {
251
- (* w ).Put (C .GoBytes (unsafe .Pointer (k ), C .int (klen )), C .GoBytes (unsafe .Pointer (v ), C .int (vlen )))
252
- }
253
- }
254
-
255
- //export replayDel
256
- func replayDel (ptr * C.char , k * C.char , klen C.size_t ) {
257
- w := (* ethdb .KeyValueWriter )(unsafe .Pointer (ptr ))
258
- if w != nil {
259
- (* w ).Delete (C .GoBytes (unsafe .Pointer (k ), C .int (klen )))
260
- }
261
- }
262
-
263
251
// Replay replays the batch contents.
264
252
func (b * rdbBatch ) Replay (w ethdb.KeyValueWriter ) error {
265
- repsize := C .size_t (0 )
266
- rep := C .rocksdb_writebatch_data (b .b , & repsize )
267
- b2 := C .rocksdb_writebatch_create_from (rep , repsize )
268
- bp := unsafe .Pointer (b2 )
269
- wp := (C .intp )(unsafe .Pointer (& w ))
270
-
271
- C .replay_iterate (wp , bp )
272
- C .rocksdb_writebatch_destroy (b2 )
273
- return nil
274
- }
275
-
276
- // replayer is a small wrapper to implement the correct replay methods.
277
- type replayer struct {
278
- writer ethdb.KeyValueWriter
279
- failure error
280
- }
281
-
282
- // Put inserts the given value into the key-value data store.
283
- func (r * replayer ) Put (key , value []byte ) {
284
- // If the replay already failed, stop executing ops
285
- if r .failure != nil {
286
- return
287
- }
288
- r .failure = r .writer .Put (key , value )
289
- }
290
-
291
- // Delete removes the key from the key-value data store.
292
- func (r * replayer ) Delete (key []byte ) {
293
- // If the replay already failed, stop executing ops
294
- if r .failure != nil {
295
- return
253
+ for _ , i := range b .data {
254
+ if i .del {
255
+ w .Delete (i .key )
256
+ } else {
257
+ w .Put (i .key , i .value )
258
+ }
296
259
}
297
- r . failure = r . writer . Delete ( key )
260
+ return nil
298
261
}
299
262
300
263
func EnableStats (b bool ) {
0 commit comments