mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-10-31 04:04:57 +08:00 
			
		
		
		
	Specify width for st.floats in hypothesis_utils.tensor (#25188)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/25188 circleci complains about generated numbers are not representable by float32 and it pollutes the logs: https://circleci.com/gh/pytorch/pytorch/2554740?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link Test Plan: circleci Imported from OSS Differential Revision: D17063240 fbshipit-source-id: 0572fb810d8ccd8cdf3f3ac7efdf0cfce5aee6ca
This commit is contained in:
		
				
					committed by
					
						 Facebook Github Bot
						Facebook Github Bot
					
				
			
			
				
	
			
			
			
						parent
						
							44a7879b6e
						
					
				
				
					commit
					c351a68f5b
				
			| @ -42,7 +42,7 @@ def _get_valid_min_max(qparams): | ||||
|     # make sure intermediate results are within the range of long | ||||
|     min_value = max((long_min - zero_point) * scale, (long_min / scale + zero_point)) | ||||
|     max_value = min((long_max - zero_point) * scale, (long_max / scale + zero_point)) | ||||
|     return min_value, max_value | ||||
|     return np.float32(min_value), np.float32(max_value) | ||||
|  | ||||
| """Hypothesis filter to avoid overflows with quantized tensors. | ||||
|  | ||||
| @ -106,7 +106,7 @@ def qparams(draw, dtypes=None, scale_min=None, scale_max=None, | ||||
|         scale_min = torch.finfo(torch.float).eps | ||||
|     if scale_max is None: | ||||
|         scale_max = torch.finfo(torch.float).max | ||||
|     scale = draw(st.floats(min_value=scale_min, max_value=scale_max)) | ||||
|     scale = draw(st.floats(min_value=scale_min, max_value=scale_max, width=32)) | ||||
|  | ||||
|     return scale, zero_point, quantized_type | ||||
|  | ||||
| @ -162,7 +162,7 @@ def tensor(draw, shapes=None, elements=None, qparams=None): | ||||
|         _shape = draw(st.sampled_from(shapes)) | ||||
|     if qparams is None: | ||||
|         if elements is None: | ||||
|             elements = st.floats(-1e6, 1e6, allow_nan=False) | ||||
|             elements = st.floats(-1e6, 1e6, allow_nan=False, width=32) | ||||
|         X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape)) | ||||
|         assume(not (np.isnan(X).any() or np.isinf(X).any())) | ||||
|         return X, None | ||||
| @ -170,7 +170,7 @@ def tensor(draw, shapes=None, elements=None, qparams=None): | ||||
|     if elements is None: | ||||
|         min_value, max_value = _get_valid_min_max(qparams) | ||||
|         elements = st.floats(min_value, max_value, allow_infinity=False, | ||||
|                              allow_nan=False) | ||||
|                              allow_nan=False, width=32) | ||||
|     X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape)) | ||||
|     # Recompute the scale and zero_points according to the X statistics. | ||||
|     scale, zp = _calculate_dynamic_qparams(X, qparams[2]) | ||||
|  | ||||
		Reference in New Issue
	
	Block a user