r calculating rolling average with window based on value (not number of rows or date/time variable)

Try out:

library(dplyr)

# count the number of values per ms
df <- df %>%
        group_by(ms) %>%
        mutate(Nb.values = n())

# consider a window of 1 ms and compute the percentage for each window
df2 <- setNames(aggregate(correct ~ factor(df$ms, levels = as.character(seq(min(df$ms), max(df$ms), 1))),
                          df, sum),
                c("ms", "Count.correct"))

# complete data frame (including unused levels)
df2 <- tidyr::complete(df2, ms)
df2$ms <- as.numeric(levels(df2$ms))[df2$ms]
df2 <- df2 %>% left_join(distinct(df[, c(1, 3)]), "ms")

# compute a rolling mean of the percentage of correct, with a width of 5
df2 %>%
        mutate(Window = paste(ms, ms+4, sep = "-"), # add windows
               Rolling.correct = zoo::rollapply(Count.correct, 5, sum, na.rm = T,
                                                partial = TRUE, fill = NA, align = "left") /
                       zoo::rollapply(Nb.values, 5, sum, na.rm = T, partial = TRUE,
                                      fill = NA, align = "left")) # add rolling mean

# A tibble: 43 x 5
      ms Count.correct Nb.values  Window Rolling.correct
   <dbl>         <dbl>     <int>   <chr>           <dbl>
 1   300             2         3 300-304            0.40
 2   301             0         1 301-305            0.00
 3   302            NA        NA 302-306            0.25
 4   303             0         1 303-307            0.25
 5   304            NA        NA 304-308            0.25
 6   305             0         2 305-309            0.25
 7   306             1         1 306-310            0.25
 8   307            NA        NA 307-311            0.00
 9   308             0         1 308-312            0.20
10   309            NA        NA 309-313            0.25
# ... with 33 more rows

For the sake of completeness, here is an answer which uses data.table to aggregate in a non-equi join.

The OP has clarified in comments, that he is looking for a sliding window of 5 ms, i.e., windows that go 300-304, 301-305, 302-306 etc.

As there is no data point with 302 ms in OP's data set, the missing values need to be filled up.

library(data.table)
ws <- 5   # define window size
setDT(df)[SJ(start = seq(min(ms), max(ms), 1))[, end := start + ws - 1], 
          on = .(ms >= start, ms <= end),
          .(share_correct = mean(correct)), by = .EACHI]
     ms  ms share_correct
 1: 300 304     0.4000000
 2: 301 305     0.0000000
 3: 302 306     0.2500000
 4: 303 307     0.2500000
 5: 304 308     0.2500000
 6: 305 309     0.2500000
 7: 306 310     0.2500000
 8: 307 311     0.0000000
 9: 308 312     0.2000000
10: 309 313     0.2500000
11: 310 314     0.2000000
12: 311 315     0.4000000
13: 312 316     0.4285714
14: 313 317     0.2857143
15: 314 318     0.3750000
16: 315 319     0.4285714
17: 316 320     0.4285714
18: 317 321     0.4000000
19: 318 322     0.4000000
20: 319 323     0.2500000
21: 320 324     0.4000000
22: 321 325     0.3333333
23: 322 326     0.5000000
24: 323 327     1.0000000
25: 324 328     1.0000000
26: 325 329     0.5000000
27: 326 330     0.2000000
28: 327 331     0.2000000
29: 328 332     0.4285714
30: 329 333     0.3333333
31: 330 334     0.2857143
32: 331 335     0.5000000
33: 332 336     0.3750000
34: 333 337     0.2857143
35: 334 338     0.3000000
36: 335 339     0.3750000
37: 336 340     0.3750000
38: 337 341     0.4285714
39: 338 342     0.4000000
40: 339 343     0.4285714
41: 340 344     0.4285714
42: 341 345     0.4000000
43: 342 346     0.5000000
     ms  ms share_correct

If the OP would be interested only in windows where the starting point exist in the dataset the code can be simplified:

setDT(df)[SJ(start = unique(ms))[, end := start + ws - 1], 
          on = .(ms >= start, ms <= end),
          .(share_correct = mean(correct)), by = .EACHI]
     ms  ms share_correct
 1: 300 304     0.4000000
 2: 301 305     0.0000000
 3: 303 307     0.2500000
 4: 305 309     0.2500000
 5: 306 310     0.2500000
 6: 308 312     0.2000000
 7: 310 314     0.2000000
 8: 311 315     0.4000000
 9: 312 316     0.4285714
10: 314 318     0.3750000
11: 315 319     0.4285714
12: 316 320     0.4285714
13: 317 321     0.4000000
14: 318 322     0.4000000
15: 320 324     0.4000000
16: 321 325     0.3333333
17: 322 326     0.5000000
18: 324 328     1.0000000
19: 328 332     0.4285714
20: 329 333     0.3333333
21: 330 334     0.2857143
22: 332 336     0.3750000
23: 334 338     0.3000000
24: 335 339     0.3750000
25: 336 340     0.3750000
26: 337 341     0.4285714
27: 338 342     0.4000000
28: 340 344     0.4285714
29: 341 345     0.4000000
30: 342 346     0.5000000
     ms  ms share_correct

In both cases, a data.table containing the intervals [start, end] is created on the fly and right joined to df. During the non-equi join, the intermediate result is immediately grouped by the join parameters (by = .EACHI) and aggregated. Note that closed intervals are used to be in line with OP's expectations.