@@ -612,43 +612,7 @@ fn not_enough_bytes<T>(_: T) -> Error {
612
612
macro_rules! vec_fast_int {
613
613
( try ( $( $Ty: ty) +) using ( $list: expr, $reader: expr, $endian: expr, $count: expr) else { $( $else: tt) * } ) => {
614
614
$( if let Some ( list) = <dyn core:: any:: Any >:: downcast_mut:: <Vec <$Ty>>( & mut $list) {
615
- let mut start = 0 ;
616
- let mut remaining = $count;
617
- // Allocating and reading from the source in chunks is done to keep
618
- // a bad `count` from causing huge memory allocations that are
619
- // doomed to fail
620
- while remaining != 0 {
621
- // Using a similar strategy as std `default_read_to_end` to
622
- // leverage the memory growth strategy of the underlying Vec
623
- // implementation (in std this will be exponential) using a
624
- // minimum byte allocation
625
- const GROWTH : usize = 32 / core:: mem:: size_of:: <$Ty>( ) ;
626
- list. reserve( remaining. min( GROWTH . max( 1 ) ) ) ;
627
-
628
- let items_to_read = remaining. min( list. capacity( ) - start) ;
629
- let end = start + items_to_read;
630
-
631
- // In benchmarks, this resize decreases performance by 27–40%
632
- // relative to using `unsafe` to write directly to uninitialised
633
- // memory, but nobody ever got fired for buying IBM
634
- list. resize( end, 0 ) ;
635
- $reader. read_exact( & mut bytemuck:: cast_slice_mut:: <_, u8 >( & mut list[ start..end] ) ) ?;
636
-
637
- remaining -= items_to_read;
638
- start += items_to_read;
639
- }
640
-
641
- if
642
- core:: mem:: size_of:: <$Ty>( ) != 1
643
- && (
644
- ( cfg!( target_endian = "big" ) && $endian == crate :: Endian :: Little )
645
- || ( cfg!( target_endian = "little" ) && $endian == crate :: Endian :: Big )
646
- )
647
- {
648
- for value in list. iter_mut( ) {
649
- * value = value. swap_bytes( ) ;
650
- }
651
- }
615
+ read_vec_fast_int( $reader, $count, $endian, list) ?;
652
616
Ok ( $list)
653
617
} else) * {
654
618
$( $else) *
@@ -657,3 +621,68 @@ macro_rules! vec_fast_int {
657
621
}
658
622
659
623
use vec_fast_int;
624
+
625
+ trait SwapBytes {
626
+ fn swap_bytes ( self ) -> Self ;
627
+ }
628
+
629
+ macro_rules! swap_bytes_impl {
630
+ ( $( $ty: ty) ,* ) => {
631
+ $(
632
+ impl SwapBytes for $ty {
633
+ #[ inline( always) ]
634
+ fn swap_bytes( self ) -> Self {
635
+ <$ty>:: swap_bytes( self )
636
+ }
637
+ }
638
+ ) *
639
+ } ;
640
+ }
641
+ swap_bytes_impl ! ( i8 , i16 , u16 , i32 , u32 , i64 , u64 , i128 , u128 ) ;
642
+
643
+ fn read_vec_fast_int < T , R > (
644
+ reader : & mut R ,
645
+ count : usize ,
646
+ endian : Endian ,
647
+ list : & mut Vec < T > ,
648
+ ) -> BinResult < ( ) >
649
+ where
650
+ R : Read ,
651
+ T : Clone + Default + bytemuck:: Pod + SwapBytes ,
652
+ {
653
+ let mut start = 0 ;
654
+ let mut remaining = count;
655
+ // Allocating and reading from the source in chunks is done to keep
656
+ // a bad `count` from causing huge memory allocations that are
657
+ // doomed to fail
658
+ while remaining != 0 {
659
+ // Using a similar strategy as std `default_read_to_end` to
660
+ // leverage the memory growth strategy of the underlying Vec
661
+ // implementation (in std this will be exponential) using a
662
+ // minimum byte allocation
663
+ let growth = 32 / core:: mem:: size_of :: < T > ( ) ;
664
+ list. reserve ( remaining. min ( growth. max ( 1 ) ) ) ;
665
+
666
+ let items_to_read = remaining. min ( list. capacity ( ) - start) ;
667
+ let end = start + items_to_read;
668
+
669
+ // In benchmarks, this resize decreases performance by 27–40%
670
+ // relative to using `unsafe` to write directly to uninitialised
671
+ // memory, but nobody ever got fired for buying IBM
672
+ list. resize ( end, T :: default ( ) ) ;
673
+ reader. read_exact ( bytemuck:: cast_slice_mut :: < _ , u8 > ( & mut list[ start..end] ) ) ?;
674
+
675
+ remaining -= items_to_read;
676
+ start += items_to_read;
677
+ }
678
+
679
+ if core:: mem:: size_of :: < T > ( ) != 1
680
+ && ( ( cfg ! ( target_endian = "big" ) && endian == crate :: Endian :: Little )
681
+ || ( cfg ! ( target_endian = "little" ) && endian == crate :: Endian :: Big ) )
682
+ {
683
+ for value in list. iter_mut ( ) {
684
+ * value = value. swap_bytes ( ) ;
685
+ }
686
+ }
687
+ Ok ( ( ) )
688
+ }
0 commit comments