How to convert binary fraction to decimal
You can split the number (as string) at the dot and treat the integer part with an own function and the fraction part with another function for the right value.
The solution works with other bases as well.
function convert(value, base = 2) {
var [integer, fraction = ''] = value.toString().split('.');
return parseInt(integer, base) + (integer[0] !== '-' || -1) * fraction
.split('')
.reduceRight((r, a) => (r + parseInt(a, base)) / base, 0);
}
console.log(convert(1100)); // 12
console.log(convert(0.0011)); // 0.1875
console.log(convert(1100.0011)); // 12.1875
console.log(convert('ABC', 16)); // 2748
console.log(convert('0.DEF', 16)); // 0.870849609375
console.log(convert('ABC.DEF', 16)); // 2748.870849609375
console.log(convert('-ABC.DEF', 16)); // -2748.870849609375
console.log(convert(-1100.0011)); // -12.1875
.as-console-wrapper { max-height: 100% !important; top: 0; }
TL;DR:
const convert = (s, b) => (+((s = s.toString().trim().split("."))[0][0] !== "-") || -1) * ((parseInt(s[0].replace("-", ""), (b = +b || 2))) + (s[1].split("").reduceRight((n, d) => (n + parseInt(d, b)) / b, 0)));
Nina Scholz has a nice example, but it doesn't work with negative numbers (plus other issues).
So, this is an improved one:
/**
* @param {string} input
* @param {number} [base]
* @returns {number}
*/
function convert(input, base = 2) {
const [ integerRaw, decimalRaw = "" ] = input.toString().trim().split(".");
const integer = parseInt(integerRaw.replace("-", ""), base);
const decimal = decimalRaw.split("").reduceRight((sum, num) => (sum + parseInt(num, base)) / base, 0);
return (integerRaw.startsWith("-") ? -1 : 1) * (integer + decimal);
}
convert("1100.0011"); // 12.1875
convert("-1100.0011"); // -12.1875
convert("-Z.ZZZ", 36); // -35.99997856652949
As I know, JavaScript doesn't provide such built-in functionality.
But I'm wondering whether there is anything standard already.
No according my knowledge
I think you need to create your own function:
function toDecimal(string, radix) {
radix = radix || 2;
var s = string.split('.');
var decimal = parseInt(s[0], radix);
if(s.length > 1){
var fract = s[1].split('');
for(var i = 0, div = radix; i < fract.length; i++, div = div * radix) {
decimal = decimal + fract[i] / div;
}
}
return decimal;
}
This question asks whether there is some JavaScript standard for parsing binary floating-point numbers, as if parseFloat()
could have taken a second radix parameter to parse the binary number 0.101
like this: parseFloat('0.101', 2)
.
While there is no such standard, there is an easy and direct way to solve this.
If we represent the binary number 0.101
as a binary fraction, it is easily converted to a decimal fraction:
0.1012 = 1012/10002 = (5/8)10 = 0.625
The following one-line expression translates this to JavaScript. Here, num
could be any type of binary number (represented as a string), including negative numbers:
parseInt(num.replace('.', ''), 2) / Math.pow(2, (num.split('.')[1] || '').length)
The solution is easily adapted to floating-point numbers in any base between 2 and 36, and we can wrap it in our own parseFloatRadix()
function:
function parseFloatRadix(num, radix) {
return parseInt(num.replace('.', ''), radix) /
Math.pow(radix, (num.split('.')[1] || '').length)
}
test('0.101', 2, 0.625);
test('0.011', 2, 0.375);
test('0.0011', 2, 0.1875);
test('-011', 2, -3);
test('011', 2, 3);
test('-1100.0011', 2, -12.1875);
test('1100.0011', 2, 12.1875);
test('0.00011001100110011001100', 2, 0.09999990463256836);
test('ABC', 16, 2748);
test('-0.DEF', 16, -0.870849609375);
test('ABC.DEF', 16, 2748.870849609375);
test('-102.201', 3, -11.703703703703704);
test('-Z.ZZZ', 36, -35.99997856652949);
function test(num, radix, expected){
let result = parseFloatRadix(num, radix);
console.log(num + ' (base ' + radix +') --> ' + result +
(result === expected ? ' (OK)' : ' (Expected ' + expected + ')'));
}