@@ -7,7 +7,7 @@ use crate::{
7
7
metrics:: { COUNTER , TIMER } ,
8
8
} ;
9
9
use aptos_indexer_grpc_utils:: file_store_operator_v2:: file_store_reader:: FileStoreReader ;
10
- use aptos_protos:: indexer:: v1:: { GetTransactionsRequest , TransactionsResponse } ;
10
+ use aptos_protos:: indexer:: v1:: { GetTransactionsRequest , ProcessedRange , TransactionsResponse } ;
11
11
use aptos_transaction_filter:: BooleanTransactionFilter ;
12
12
use futures:: executor:: block_on;
13
13
use std:: {
@@ -167,14 +167,21 @@ impl HistoricalDataService {
167
167
/*retries=*/ 3 ,
168
168
/*max_files=*/ None ,
169
169
filter,
170
+ Some ( ending_version) ,
170
171
tx,
171
172
)
172
173
. await ;
173
174
} ) ;
174
175
175
176
let mut close_to_latest = false ;
176
- while let Some ( ( transactions, batch_size_bytes, timestamp) ) = rx. recv ( ) . await {
177
- next_version += transactions. len ( ) as u64 ;
177
+ while let Some ( (
178
+ transactions,
179
+ batch_size_bytes,
180
+ timestamp,
181
+ ( first_processed_version, last_processed_version) ,
182
+ ) ) = rx. recv ( ) . await
183
+ {
184
+ next_version = last_processed_version + 1 ;
178
185
size_bytes += batch_size_bytes as u64 ;
179
186
let timestamp_since_epoch =
180
187
Duration :: new ( timestamp. seconds as u64 , timestamp. nanos as u32 ) ;
@@ -186,28 +193,55 @@ impl HistoricalDataService {
186
193
close_to_latest = true ;
187
194
}
188
195
189
- if !transactions. is_empty ( ) {
190
- let responses =
191
- transactions
192
- . chunks ( max_num_transactions_per_batch)
193
- . map ( |chunk| TransactionsResponse {
196
+ let responses = if !transactions. is_empty ( ) {
197
+ let mut current_version = first_processed_version;
198
+ let mut responses: Vec < _ > = transactions
199
+ . chunks ( max_num_transactions_per_batch)
200
+ . map ( |chunk| {
201
+ let first_version = current_version;
202
+ let last_version = chunk. last ( ) . unwrap ( ) . version ;
203
+ current_version = last_version + 1 ;
204
+ TransactionsResponse {
194
205
transactions : chunk. to_vec ( ) ,
195
206
chain_id : Some ( self . chain_id ) ,
196
- } ) ;
197
- for response in responses {
198
- let _timer = TIMER
199
- . with_label_values ( & [ "historical_data_service_send_batch" ] )
200
- . start_timer ( ) ;
201
- if response_sender. send ( Ok ( response) ) . await . is_err ( ) {
202
- // NOTE: We are not recalculating the version and size_bytes for the stream
203
- // progress since nobody cares about the accurate if client has dropped the
204
- // connection.
205
- info ! ( stream_id = id, "Client dropped." ) ;
206
- COUNTER
207
- . with_label_values ( & [ "historical_data_service_client_dropped" ] )
208
- . inc ( ) ;
209
- break ' out;
210
- }
207
+ processed_range : Some ( ProcessedRange {
208
+ first_version,
209
+ last_version,
210
+ } ) ,
211
+ }
212
+ } )
213
+ . collect ( ) ;
214
+ responses
215
+ . last_mut ( )
216
+ . unwrap ( )
217
+ . processed_range
218
+ . unwrap ( )
219
+ . last_version = last_processed_version;
220
+ responses
221
+ } else {
222
+ vec ! [ TransactionsResponse {
223
+ transactions: vec![ ] ,
224
+ chain_id: Some ( self . chain_id) ,
225
+ processed_range: Some ( ProcessedRange {
226
+ first_version: first_processed_version,
227
+ last_version: last_processed_version,
228
+ } ) ,
229
+ } ]
230
+ } ;
231
+
232
+ for response in responses {
233
+ let _timer = TIMER
234
+ . with_label_values ( & [ "historical_data_service_send_batch" ] )
235
+ . start_timer ( ) ;
236
+ if response_sender. send ( Ok ( response) ) . await . is_err ( ) {
237
+ // NOTE: We are not recalculating the version and size_bytes for the stream
238
+ // progress since nobody cares about the accurate if client has dropped the
239
+ // connection.
240
+ info ! ( stream_id = id, "Client dropped." ) ;
241
+ COUNTER
242
+ . with_label_values ( & [ "historical_data_service_client_dropped" ] )
243
+ . inc ( ) ;
244
+ break ' out;
211
245
}
212
246
}
213
247
}
0 commit comments