Every line of 'float upto 2 decimal in python' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.
5 def fmtFloat(n): 6 return '{:6.3f}'.format(n)
84 def to_float(num): 85 """Convert anything to float.""" 86 return float(to_num(num))
60 def test_decimal_int(self): 61 a, b = mathfilters.handle_float_decimal_combinations(Decimal('2.0'), 1, '+') 62 self.assertTrue(isinstance(a, Decimal), 'Type is {0}'.format(type(a))) 63 self.assertTrue(isinstance(b, int), 'Type is {0}'.format(type(b)))
26 def truncate_float(float_num, n): 27 str_repr = str(float_num) 28 idx = str_repr.find('.') 29 if idx > 0: 30 return float(str_repr[0: 1 + idx + n]) 31 else: 32 return float_num
10 def convert_to_decimal(number, frac=False): 11 bad_type = not isinstance(number, (int, long, str, unicode, Decimal, Fraction)) 12 if bad_type: 13 message = "{}, of type {} is not suitable as a Decimal" 14 logger.warning(message.format(number, type(number))) 15 return Fraction(number) if frac else Decimal(number)
23 def BN_convert_float(module): 24 ''' 25 Designed to work with network_to_half. 26 BatchNorm layers need parameters in single precision. 27 Find all layers and convert them back to float. This can't 28 be done with built in .apply as that function will apply 29 fn to all modules, parameters, and buffers. Thus we wouldn't 30 be able to guard the float conversion based on the module type. 31 ''' 32 if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): 33 module.float() 34 for child in module.children(): 35 BN_convert_float(child) 36 return module
14 def makeFloat(sign, ds, tail): 15 return float((sign or '') + ds + tail)