@@ -133,6 +133,30 @@ def _(layer: Reshape):
133
133
@_request_kif .register
134
134
def _ (layer : Activation ):
135
135
fn_name = layer .attributes .get ('activation' )
136
+
137
+ if layer .attributes .get ('trusted' , False ):
138
+ result_t = layer .get_output_variable ().type .precision
139
+ if fn_name in ('linear' , 'relu' ):
140
+ output_shape = get_output_shape (layer )
141
+ k , w , f = result_t .signed , result_t .width , result_t .fractional
142
+ i = w - k - f
143
+ k = np .full (output_shape , k , dtype = np .int16 )
144
+ i = np .full (output_shape , i , dtype = np .int16 )
145
+ f = np .full (output_shape , f , dtype = np .int16 )
146
+ if result_t .rounding_mode == RoundingMode .RND :
147
+ f += 1
148
+ elif result_t .rounding_mode != RoundingMode .TRN :
149
+ f = np .full (output_shape , 126 , dtype = np .int16 )
150
+ if result_t .saturation_mode != SaturationMode .WRAP :
151
+ k = np .ones (output_shape , dtype = np .int16 )
152
+ i = np .full (output_shape , 126 , dtype = np .int16 )
153
+ if fn_name == 'linear' :
154
+ return ((k , i , f ),)
155
+ else :
156
+ k = np .ones (output_shape , dtype = np .int16 )
157
+ i = np .full (output_shape , 126 , dtype = np .int16 )
158
+ return ((k , i , f ),)
159
+
136
160
if fn_name == 'linear' :
137
161
return (requested_kif (layer ),)
138
162
if fn_name == 'relu' :
@@ -533,6 +557,16 @@ def _(layer: Concatenate):
533
557
@_produce_kif .register
534
558
def _ (layer : Activation ):
535
559
fn_name = layer .attributes ['activation' ].lower ()
560
+ if layer .attributes .get ('trusted' , False ):
561
+ output_shape = get_output_shape (layer )
562
+ result_t = layer .get_output_variable ().type .precision
563
+ k , w , f = result_t .signed , result_t .width , result_t .fractional
564
+ i = w - k - f
565
+ k = np .full (output_shape , k , dtype = np .int16 )
566
+ i = np .full (output_shape , i , dtype = np .int16 )
567
+ f = np .full (output_shape , f , dtype = np .int16 )
568
+ return k , i , f
569
+
536
570
k , i , f = get_input_kifs (layer )[0 ]
537
571
538
572
match fn_name :
@@ -605,6 +639,10 @@ def requested_by_non_saturating_quantizer(layer: Layer) -> bool:
605
639
606
640
607
641
def default_register_precision (layer : Layer ):
642
+ if layer .attributes .get ('trusted' , False ):
643
+ # Trusted layers have their precision already set
644
+ return
645
+
608
646
_pk , _pi , _pf = produce_kif (layer ) # Maximum possible k,i,f output from this layer
609
647
_rk , _ri , _rf = requested_kif (layer ) # Maximum possible k,i,f may be utilized by the next layer
610
648
_oi , _of = np .minimum (_pi , _ri ), np .minimum (_pf , _rf )
@@ -793,7 +831,11 @@ def has_fixed_quantizer(self, model: 'ModelGraph'):
793
831
return True
794
832
795
833
def _match (self , model : 'ModelGraph' ):
796
- return self .has_fixed_quantizer (model )
834
+ enabled = model .config .config ['HLSConfig' ]['Model' ].get ('BitExact' , None )
835
+ if enabled is None :
836
+ # Enable by default if any FixedPointQuantizer is present
837
+ enabled = self .has_fixed_quantizer (model )
838
+ return enabled
797
839
798
840
def transform (self , model : 'ModelGraph' ):
799
841
if not self ._match (model ):
0 commit comments