Loading and preparing data for analysis
data = read.table("XP4ComparativeStudy_logs.csv", header=TRUE, sep=",")
data = data %>% arrange(participantId, tech, block, targetSize, density)
# filtering first trial of each set, because we don't know where in the scene, the cursor come from.
data = data %>% filter(trialId > 1)
Outliers stats
Filtering data that are above mean + 3 * std_dev
for each technique:
outliersStats = data %>%
filter(success == "True") %>% # ignoring error trials for the value of outliers threshold
group_by(tech) %>% # the outlier threshold is computed for each technique
summarize(outlierThreshold = mean(finalTimer) + 3 * sd(finalTimer))
# contains only outliers records
dataOutliers = data %>% left_join(outliersStats, by = c("tech")) %>%
filter(success == "True" & finalTimer > outlierThreshold)
# number of outliers trials for each technique
kable(dataOutliers %>% count(tech))
1_RayCasting |
16 |
2_RayCursor |
13 |
3_SemiAuto |
24 |
4_RoEtAl |
22 |
# filter data to remove outliers (don't filter if success == false)
data = data %>% left_join(outliersStats, by = c("tech")) %>%
filter(success == "False" | finalTimer <= outlierThreshold)
# aggregating by set of trials, and computing error rate:
dataErrorMeans = data %>%
group_by(participantId, tech, block, targetSize, density) %>%
summarize(errorRate = computeErrorRate(success))
# same aggregation as above, but we compute the mean time:
dataSuccessAggr = data %>%
filter(success == "True") %>%
group_by(participantId, tech, block, targetSize, density) %>%
summarize(meanFinalTimer = mean(finalTimer))
# aggregated data, with mean time and error rate in the same data table:
dataAggr = left_join(dataSuccessAggr, dataErrorMeans, by = c("participantId", "tech", "block", "targetSize", "density"))
# useful if we exclude block 1 in some analysis:
dataAggrBlock23 = dataAggr %>% filter(block > 1)
techniques = unique(data$tech)
blocks = unique(data$block)
targetSizes = unique(data$targetSize)
densities = unique(data$density)
Selection time
Effect of blocks on selection time
Check the normality of data
data.long = melt(dataAggr, id = c("meanFinalTimer", "participantId", "tech", "block", "targetSize", "density"))
data.long$tech = factor(data.long$tech)
data.long$block = factor(data.long$block)
data.long$targetSize = factor(data.long$targetSize)
data.long$density = factor(data.long$density)
m <- aov(meanFinalTimer ~ tech*block, data=data.long)
pander(normalCheck(m))
Shapiro-Wilk normality test
data: res W = 0.90592, p-value < 2.2e-16
Shapiro-Wilk normality test: res
0.9059 |
2.238e-18 * * * |
Selection time per block
kable(data.long %>% group_by(block) %>% summarise(mean = mean(meanFinalTimer)))
1 |
2.965367 |
2 |
2.723390 |
3 |
2.592280 |
Post-hoc analysis with Bonferroni correction
attach(datatr)
pw <- pairwise.t.test(meanFinalTimer, interaction(block), p.adj = "bonferroni")
detach(datatr)
kable(pw$p.value)
2 |
0.1605040 |
NA |
3 |
0.0168319 |
1 |
Analysis with all blocks
Selection time per technique

1_RayCasting |
1.770374 |
2_RayCursor |
2.721099 |
3_SemiAuto |
1.876999 |
4_RoEtAl |
4.672911 |
Post-hoc analysis with Bonferroni correction
attach(datatr)
pw <- pairwise.t.test(meanFinalTimer, interaction(tech), p.adj = "bonferroni")
detach(datatr)
kable(pw$p.value)
2_RayCursor |
0 |
NA |
NA |
3_SemiAuto |
1 |
0 |
NA |
4_RoEtAl |
0 |
0 |
0 |
Selection time per technique * size

1_RayCasting |
Large |
1.663111 |
2_RayCursor |
Large |
2.707410 |
3_SemiAuto |
Large |
1.911865 |
4_RoEtAl |
Large |
4.270223 |
1_RayCasting |
Small |
1.877638 |
2_RayCursor |
Small |
2.734788 |
3_SemiAuto |
Small |
1.842134 |
4_RoEtAl |
Small |
5.075599 |
Post-hoc analysis with Bonferroni correction
attach(datatr)
pw <- pairwise.t.test(meanFinalTimer, interaction(tech,targetSize), p.adj = "bonferroni")
detach(datatr)
kable(pw$p.value)
2_RayCursor.Large |
0.0000000 |
NA |
NA |
NA |
NA |
NA |
NA |
3_SemiAuto.Large |
0.3864316 |
0 |
NA |
NA |
NA |
NA |
NA |
4_RoEtAl.Large |
0.0000000 |
0 |
0 |
NA |
NA |
NA |
NA |
1_RayCasting.Small |
0.0798407 |
0 |
1 |
0.0000000 |
NA |
NA |
NA |
2_RayCursor.Small |
0.0000000 |
1 |
0 |
0.0000000 |
0 |
NA |
NA |
3_SemiAuto.Small |
1.0000000 |
0 |
1 |
0.0000000 |
1 |
0 |
NA |
4_RoEtAl.Small |
0.0000000 |
0 |
0 |
0.0579343 |
0 |
0 |
0 |
Selection time per technique * density

1_RayCasting |
High |
1.828241 |
2_RayCursor |
High |
2.824155 |
3_SemiAuto |
High |
1.894124 |
4_RoEtAl |
High |
4.535939 |
1_RayCasting |
Low |
1.712508 |
2_RayCursor |
Low |
2.618043 |
3_SemiAuto |
Low |
1.859875 |
4_RoEtAl |
Low |
4.809883 |
Post-hoc analysis with Bonferroni correction
attach(datatr)
pw <- pairwise.t.test(meanFinalTimer, interaction(tech,density), p.adj = "bonferroni")
detach(datatr)
kable(pw$p.value)
2_RayCursor.High |
0 |
NA |
NA |
NA |
NA |
NA |
NA |
3_SemiAuto.High |
1 |
0 |
NA |
NA |
NA |
NA |
NA |
4_RoEtAl.High |
0 |
0 |
0 |
NA |
NA |
NA |
NA |
1_RayCasting.Low |
1 |
0 |
1 |
0 |
NA |
NA |
NA |
2_RayCursor.Low |
0 |
1 |
0 |
0 |
0 |
NA |
NA |
3_SemiAuto.Low |
1 |
0 |
1 |
0 |
1 |
0 |
NA |
4_RoEtAl.Low |
0 |
0 |
0 |
1 |
0 |
0 |
0 |
Error rate
Check the normality of data
data.long = melt(dataAggr, id = c("errorRate", "participantId", "tech", "block", "targetSize", "density"))
data.long$tech = factor(data.long$tech)
data.long$block = factor(data.long$block)
data.long$targetSize = factor(data.long$targetSize)
data.long$density = factor(data.long$density)
m <- aov(errorRate ~ tech*block, data=data.long)
pander(normalCheck(m))
Shapiro-Wilk normality test
data: res W = 0.84744, p-value < 2.2e-16
Shapiro-Wilk normality test: res
0.8474 |
3.963e-23 * * * |
Error rate does not follow a normal distribution.
Effect of blocks on error rate
Running ANOVA on ART
m = art(errorRate ~ block + (1|participantId), data=data.long)
kable(anova(m))
block |
block |
2.886354 |
2 |
562 |
0.0566066 |
Analysis with all blocks
Running ANOVA on ART
m = art(errorRate ~ tech*targetSize*density + (1|participantId), data=data.long)
kable(anova(m))
tech |
tech |
25.5157414 |
3 |
549 |
0.0000000 |
targetSize |
targetSize |
11.9173815 |
1 |
549 |
0.0005990 |
density |
density |
0.0132082 |
1 |
549 |
0.9085451 |
tech:targetSize |
tech:targetSize |
2.6162936 |
3 |
549 |
0.0503224 |
tech:density |
tech:density |
2.7323168 |
3 |
549 |
0.0431323 |
targetSize:density |
targetSize:density |
3.6180970 |
1 |
549 |
0.0576773 |
tech:targetSize:density |
tech:targetSize:density |
0.3485347 |
3 |
549 |
0.7902296 |
Error rate per technique

1_RayCasting |
5.737434 |
2_RayCursor |
3.405258 |
3_SemiAuto |
2.434689 |
4_RoEtAl |
11.887401 |
Post-hoc analysis with Bonferroni correction
pw = lsmeans(artlm(m, "tech"), pairwise ~ tech)
## Loading required namespace: lmerTest
## NOTE: Results may be misleading due to involvement in interactions
kable(summary(pw$contrasts))
## Warning in ptukey(sqrt(2) * abst, fam.size, zapsmall(df), lower.tail =
## FALSE): NaNs produced
1_RayCasting - 2_RayCursor |
60.67361 |
16.75024 |
34.3125000 |
3.622252 |
0.0049363 |
1_RayCasting - 3_SemiAuto |
37.14583 |
16.75024 |
34.3125000 |
2.217629 |
0.1387060 |
1_RayCasting - 4_RoEtAl |
-76.12500 |
16.75024 |
0.2835744 |
-4.544710 |
NaN |
2_RayCursor - 3_SemiAuto |
-23.52778 |
16.75024 |
34.3125000 |
-1.404623 |
0.5051509 |
2_RayCursor - 4_RoEtAl |
-136.79861 |
16.75024 |
0.2835744 |
-8.166962 |
NaN |
3_SemiAuto - 4_RoEtAl |
-113.27083 |
16.75024 |
0.2835744 |
-6.762339 |
NaN |
attach(data.long)
pw <- pairwise.t.test(errorRate, interaction(tech), p.adj = "bonferroni")
detach(data.long)
kable(pw$p.value)
2_RayCursor |
0.1804213 |
NA |
NA |
3_SemiAuto |
0.0130348 |
1 |
NA |
4_RoEtAl |
0.0000001 |
0 |
0 |
Error rate per technique * size

1_RayCasting |
Large |
5.464616 |
2_RayCursor |
Large |
3.462302 |
3_SemiAuto |
Large |
2.041997 |
4_RoEtAl |
Large |
9.995040 |
1_RayCasting |
Small |
6.010251 |
2_RayCursor |
Small |
3.348214 |
3_SemiAuto |
Small |
2.827381 |
4_RoEtAl |
Small |
13.779762 |
Error rate per size
kable(data.long %>% group_by(targetSize) %>% summarise(mean = mean(errorRate)))
Large |
5.240989 |
Small |
6.491402 |
Error rate per technique * density

1_RayCasting |
High |
7.076720 |
2_RayCursor |
High |
4.379960 |
3_SemiAuto |
High |
3.224206 |
4_RoEtAl |
High |
11.210318 |
1_RayCasting |
Low |
4.398148 |
2_RayCursor |
Low |
2.430556 |
3_SemiAuto |
Low |
1.645172 |
4_RoEtAl |
Low |
12.564484 |
Preferences

0 |
3 |
2 |
1 |
4 |
1 |
2 |
3 |
1 |
4 |
2 |
1 |
2 |
3 |
4 |
3 |
2 |
3 |
1 |
4 |
4 |
3 |
2 |
1 |
4 |
5 |
3 |
2 |
1 |
4 |
6 |
3 |
1 |
2 |
4 |
7 |
2 |
3 |
1 |
4 |
8 |
2 |
3 |
1 |
4 |
9 |
3 |
2 |
1 |
4 |
10 |
1 |
2 |
3 |
4 |
11 |
2 |
3 |
1 |
4 |
Median
1_RayCasting |
2 |
2_RayCursor |
2 |
3_SemiAuto |
1 |
4_RoEtAl |
4 |
Friedman
##
## Friedman rank sum test
##
## data: data.matrix(data_tr)
## Friedman chi-squared = 25.3, df = 3, p-value = 1.336e-05
Wilcoxon post-hoc analysis with Bonferroni correction
2_RayCursor |
1.0000000 |
NA |
NA |
3_SemiAuto |
0.0927879 |
0.0405306 |
NA |
4_RoEtAl |
0.0000499 |
0.0000463 |
3.22e-05 |