@@ -36,12 +36,13 @@ type diskLayer struct {
36
36
db * Database // Path-based trie database
37
37
cleans * fastcache.Cache // GC friendly memory cache of clean node RLPs
38
38
buffer * nodebuffer // Node buffer to aggregate writes
39
+ frozen * nodebuffer // Frozen node buffer waiting for flushing
39
40
stale bool // Signals that the layer became stale (state progressed)
40
41
lock sync.RWMutex // Lock used to protect stale flag
41
42
}
42
43
43
44
// newDiskLayer creates a new disk layer based on the passing arguments.
44
- func newDiskLayer (root common.Hash , id uint64 , db * Database , cleans * fastcache.Cache , buffer * nodebuffer ) * diskLayer {
45
+ func newDiskLayer (root common.Hash , id uint64 , db * Database , cleans * fastcache.Cache , buffer * nodebuffer , frozen * nodebuffer ) * diskLayer {
45
46
// Initialize a clean cache if the memory allowance is not zero
46
47
// or reuse the provided cache if it is not nil (inherited from
47
48
// the original disk layer).
@@ -54,6 +55,7 @@ func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.C
54
55
db : db ,
55
56
cleans : cleans ,
56
57
buffer : buffer ,
58
+ frozen : frozen ,
57
59
}
58
60
}
59
61
@@ -102,16 +104,19 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
102
104
if dl .stale {
103
105
return nil , common.Hash {}, nil , errSnapshotStale
104
106
}
105
- // Try to retrieve the trie node from the not-yet-written
106
- // node buffer first. Note the buffer is lock free since
107
- // it's impossible to mutate the buffer before tagging the
108
- // layer as stale.
109
- n , found := dl .buffer .node (owner , path )
110
- if found {
111
- dirtyHitMeter .Mark (1 )
112
- dirtyReadMeter .Mark (int64 (len (n .Blob )))
113
- dirtyNodeHitDepthHist .Update (int64 (depth ))
114
- return n .Blob , n .Hash , & nodeLoc {loc : locDirtyCache , depth : depth }, nil
107
+ // Try to retrieve the trie node from the not-yet-written node buffer first
108
+ // (both the live one and the frozen one). Note the buffer is lock free since
109
+ // it's impossible to mutate the buffer before tagging the layer as stale.
110
+ for _ , buffer := range []* nodebuffer {dl .buffer , dl .frozen } {
111
+ if buffer != nil {
112
+ n , found := buffer .node (owner , path )
113
+ if found {
114
+ dirtyHitMeter .Mark (1 )
115
+ dirtyReadMeter .Mark (int64 (len (n .Blob )))
116
+ dirtyNodeHitDepthHist .Update (int64 (depth ))
117
+ return n .Blob , n .Hash , & nodeLoc {loc : locDirtyCache , depth : depth }, nil
118
+ }
119
+ }
115
120
}
116
121
dirtyMissMeter .Mark (1 )
117
122
@@ -135,6 +140,11 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
135
140
} else {
136
141
blob = rawdb .ReadStorageTrieNode (dl .db .diskdb , owner , path )
137
142
}
143
+ // Store the resolved data in the clean cache. The background buffer flusher
144
+ // may also write to the clean cache concurrently, but two writers cannot
145
+ // write the same item with different content. If the item already exists,
146
+ // it will be found in the frozen buffer, eliminating the need to check the
147
+ // database.
138
148
if dl .cleans != nil && len (blob ) > 0 {
139
149
dl .cleans .Set (key , blob )
140
150
cleanWriteMeter .Mark (int64 (len (blob )))
@@ -182,29 +192,51 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
182
192
// Mark the diskLayer as stale before applying any mutations on top.
183
193
dl .stale = true
184
194
185
- // Store the root->id lookup afterwards . All stored lookups are identified
195
+ // Store the root->id lookup afterward . All stored lookups are identified
186
196
// by the **unique** state root. It's impossible that in the same chain
187
197
// blocks are not adjacent but have the same root.
188
198
if dl .id == 0 {
189
199
rawdb .WriteStateID (dl .db .diskdb , dl .root , 0 )
190
200
}
191
201
rawdb .WriteStateID (dl .db .diskdb , bottom .rootHash (), bottom .stateID ())
192
202
193
- // Construct a new disk layer by merging the nodes from the provided diff
194
- // layer, and flush the content in disk layer if there are too many nodes
195
- // cached. The clean cache is inherited from the original disk layer.
196
- ndl := newDiskLayer (bottom .root , bottom .stateID (), dl .db , dl .cleans , dl .buffer .commit (bottom .nodes ))
197
-
198
203
// In a unique scenario where the ID of the oldest history object (after tail
199
204
// truncation) surpasses the persisted state ID, we take the necessary action
200
- // of forcibly committing the cached dirty nodes to ensure that the persisted
205
+ // of forcibly committing the cached dirty states to ensure that the persisted
201
206
// state ID remains higher.
202
- if ! force && rawdb .ReadPersistentStateID (dl .db .diskdb ) < oldest {
207
+ persistedID := rawdb .ReadPersistentStateID (dl .db .diskdb )
208
+ if ! force && persistedID < oldest {
203
209
force = true
204
210
}
205
- if err := ndl .buffer .flush (ndl .db .diskdb , ndl .cleans , ndl .id , force ); err != nil {
206
- return nil , err
211
+ // Merge the nodes of the bottom-most diff layer into the buffer as the combined one
212
+ combined := dl .buffer .commit (bottom .nodes )
213
+ if combined .full () || force {
214
+ // Wait until the previous frozen buffer is fully flushed
215
+ if dl .frozen != nil {
216
+ if err := dl .frozen .waitFlush (); err != nil {
217
+ return nil , err
218
+ }
219
+ }
220
+ dl .frozen = nil
221
+
222
+ // Freeze the live buffer and schedule background flushing
223
+ dl .frozen = combined
224
+ dl .frozen .flush (dl .db .diskdb , dl .cleans , bottom .stateID ())
225
+
226
+ // Block until the frozen buffer is fully flushed out if the oldest history
227
+ // surpasses the persisted state ID.
228
+ if persistedID < oldest {
229
+ if err := dl .frozen .waitFlush (); err != nil {
230
+ return nil , err
231
+ }
232
+ }
233
+ combined = newNodeBuffer (dl .db .bufferSize , nil , 0 )
207
234
}
235
+ // Construct a new disk layer by merging the nodes from the provided diff
236
+ // layer, and flush the content in disk layer if there are too many nodes
237
+ // cached. The clean cache is inherited from the original disk layer.
238
+ ndl := newDiskLayer (bottom .root , bottom .stateID (), dl .db , dl .cleans , combined , dl .frozen )
239
+
208
240
// To remove outdated history objects from the end, we set the 'tail' parameter
209
241
// to 'oldest-1' due to the offset between the freezer index and the history ID.
210
242
if overflow {
@@ -249,25 +281,23 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
249
281
return nil , err
250
282
}
251
283
} else {
284
+ // Block until the frozen buffer is fully flushed
285
+ if dl .frozen != nil {
286
+ if err := dl .frozen .waitFlush (); err != nil {
287
+ return nil , err
288
+ }
289
+ // Unset the frozen buffer if it exists, otherwise these "reverted"
290
+ // states will still be accessible after revert in frozen buffer.
291
+ dl .frozen = nil
292
+ }
252
293
batch := dl .db .diskdb .NewBatch ()
253
294
writeNodes (batch , nodes , dl .cleans )
254
295
rawdb .WritePersistentStateID (batch , dl .id - 1 )
255
296
if err := batch .Write (); err != nil {
256
297
log .Crit ("Failed to write states" , "err" , err )
257
298
}
258
299
}
259
- return newDiskLayer (h .meta .parent , dl .id - 1 , dl .db , dl .cleans , dl .buffer ), nil
260
- }
261
-
262
- // setBufferSize sets the node buffer size to the provided value.
263
- func (dl * diskLayer ) setBufferSize (size int ) error {
264
- dl .lock .RLock ()
265
- defer dl .lock .RUnlock ()
266
-
267
- if dl .stale {
268
- return errSnapshotStale
269
- }
270
- return dl .buffer .setSize (size , dl .db .diskdb , dl .cleans , dl .id )
300
+ return newDiskLayer (h .meta .parent , dl .id - 1 , dl .db , dl .cleans , dl .buffer , dl .frozen ), nil
271
301
}
272
302
273
303
// size returns the approximate size of cached nodes in the disk layer.
0 commit comments