How do I reference the entire row when creating a new column in a data.table?
You can use sweep()
with rowSums
, i.e.
rowSums(sweep(dt, 2, reference) != 0)
#[1] 2 2 2 2 4 4 3 2 4 3 2 1 3 4 1 3
BENCHMARK
HUGH <- function(dt) {
dt[, I := .I]
distance_by_I <- melt(dt, id.vars = "I")[, .(distance = sum(reference != value)), keyby = "I"]
return(dt[distance_by_I, on = "I"])
}
Sotos <- function(dt) {
return(rowSums(sweep(dt, 2, reference) != 0))
}
dt1 <- as.data.table(replicate(5, sample(c(0, 1), 100000, replace = TRUE)))
microbenchmark(HUGH(dt1), Sotos(dt1))
#Unit: milliseconds
# expr min lq mean median uq max neval cld
# HUGH(dt1) 112.71936 117.03380 124.05758 121.6537 128.09904 155.68470 100 b
# Sotos(dt1) 23.66799 31.11618 33.84753 32.8598 34.02818 68.75044 100 a
Another:
ref = as.list(reference)
dt[, Reduce(`+`, Map(`!=`, .SD, ref))]
How it works. So we're taking each vector column in .SD
and comparing it to the single corresponding value in ref
. The !=
function is vectorized, so each element of ref
is recycled out to match the length of each vector.
This Map
call returns a list of TRUE/FALSE vectors, one for each column. When we add up TRUE/FALSE values, they are treated as 1/0, so we just need to add these columns up. This can be achieved by passing the pairwise operator +
between the first column and the second; and then again between the result of that computation and the third column; and so on. This is how Reduce
works. It might be more readable as
x = dt[, Map(`!=`, .SD, ref)]
Reduce(`+`, x, init = 0L)
which can be read as
- v = 0
- for each xi in x, update v = v + xi
See also ?Map
and ?Reduce
.
Timings. I'm modifying the benchmark data, since using integers seems a lot saner if the OP really has 0-1 data. Also, adding more columns since the OP says they have a lot. Finally, editing Hugh's answer to be comparable to the others:
HUGH <- function(dt, r) {
dt[, I := .I]
res <- melt(dt, id.vars = "I")[, .(distance = sum(r != value)), keyby = "I"]$distance
dt[, I := NULL]
res
}
Sotos <- function(dt, r) {
return(rowSums(sweep(dt, 2, r) != 0))
}
mm <- function(dt, r){
colSums(t(dt) != r)
}
ff <- function(DT, r){
DT[, Reduce(`+`, Map(`!=`, .SD, r))]
}
nr = 20000
nc = 500
dt1 <- as.data.table(replicate(nc, sample(0:1, nr, replace = TRUE)))
ref <- rep(as.integer(reference), length.out=nc)
lref = as.list(ref)
identical(HUGH(dt1, ref), ff(dt1, lref)) # integer output
identical(mm(dt1, ref), Sotos(dt1, ref)) # numeric output
all.equal(HUGH(dt1, ref), mm(dt1, ref)) # but they match
# all TRUE
microbenchmark::microbenchmark(times = 3,
HUGH(dt1, ref),
Sotos(dt1, ref),
mm(dt1, ref),
ff(dt1, lref)
)
Result:
Unit: milliseconds
expr min lq mean median uq max neval
HUGH(dt1, ref) 365.0529 370.05233 378.8826 375.0517 385.79737 396.5430 3
Sotos(dt1, ref) 871.5693 926.50462 961.5527 981.4400 1006.54437 1031.6488 3
mm(dt1, ref) 104.5631 121.74086 131.7157 138.9186 145.29197 151.6653 3
ff(dt1, lref) 87.0800 87.48975 93.1361 87.8995 96.16415 104.4288 3
Here's another way:
mm <- function(dt){
colSums(t(dt) != reference)
}
mm(dt)
# [1] 2 2 2 2 4 4 3 2 4 3 2 1 3 4 1 3
benchmark
library(data.table)
dt1 <- as.data.table(replicate(5, sample(c(0, 1), 100000, replace = TRUE)))
identical(Sotos(dt1), mm(dt1))
# [1] TRUE
microbenchmark::microbenchmark(HUGH(dt1), Sotos(dt1), mm(dt1))
# Unit: milliseconds
# expr min lq mean median uq max neval cld
# HUGH(dt1) 85.542550 101.339416 129.71317 106.634169 112.66004 473.9380 100 b
# Sotos(dt1) 35.699128 42.677696 125.95430 180.302919 189.34098 377.9523 100 b
# mm(dt1) 4.604986 7.002416 17.57238 9.819895 12.27015 165.1440 100 a