## Title: exp023 analysis ## Author: Lloyd Chilcott ## Checked: Tina Seabrooke ## Setup ---- ##Clear workspace rm(list=ls()) ## Load libraries library(plyr) # provides 'ddply' library(Rmisc) # provides 'summarySE' library(ez) # provides 'ezANOVA' library(tidyr) # provides 'spread' #Disable scientific notation options(scipen = 999) ## Read data file for combined data (n=137) data <- read.csv("exp023-data.csv", stringsAsFactors = F) ############################################################################### ## Exclusions ---- # Exclude participants within defined exclusion criteria data2 <- data[data$english == 'Yes' & data$botcheckacc == '1' & data$cheat_check == 'No',] ## Participants ---- # Participant info as one line per participant ppts <- data2[!duplicated(data2$subject), 1:8] print('Participant details') ddply( ppts, 'exp', summarise, N = length(subject), Female = sum(gender == 'Female'), Male = sum(gender == 'Male'), Min = min(age), Max = max(age), Mean = mean(age), SD = sd(age), SEM = sd(age)/sqrt(N)) ############################################################################### ## Analysis of JoL Rating DV ---- ## Percentage of missing JOLs (NA ratings) per participant # Isolate JOL and encoding phase JolOnly <- data2[data2$encodinggroup == 'JOL' & data2$running == 'Encoding',] # Mean percentage of missing JOLs per participant jolsNA = ddply(JolOnly, .(subject), summarise, N = length(subject), totalNAs = sum(is.na(responses)), percNAs = sum(is.na(responses)/length(subject))*100) summarySE(jolsNA, measurevar = 'percNAs') ## Mean JoLs per condition (rhyming, semantic, unrelated) ---- JolOnly <- JolOnly[! is.na(JolOnly$responses),] JolOnly$responses <- as.numeric(JolOnly$responses) meanJoLRatings <- ddply(JolOnly, c('subject', 'testgroup', 'condition'), summarise, totalJoLs = length(running), JoLRatings = mean(responses)) ## Total mean & SD JoL ratings per condition # Mean with collapsed test group summarySE(meanJoLRatings, measurevar = 'JoLRatings', groupvars = 'condition') # Mean with test group summarySE(meanJoLRatings, measurevar = 'JoLRatings', groupvars = c('testgroup', 'condition')) Means <- summarySE(meanJoLRatings, measurevar = 'JoLRatings', groupvars = c('testgroup', 'condition')) write.csv(Means, 'means.csv', row.names = F) ## JoL Rating DV: Repeated Measures ANOVA ---- ## Factorise columns for ezANOVA cols <- c('subject', 'testgroup', 'condition') meanJoLRatings[cols] <- lapply(meanJoLRatings[cols], factor) # Mixed ANOVA of JoL rating (DV), condition (IV) and test group (IV) ezANOVA( data = meanJoLRatings, wid = subject, dv = JoLRatings, within = condition, between = testgroup, detailed = T, type = 3) ## Post-hoc paired t-tests for condition ---- # Create long data meanJoLRatings2 <- subset(meanJoLRatings, select = -totalJoLs) meanJoLRatings2Wide <- spread(meanJoLRatings2, condition, JoLRatings) # Semantic v rhyme word pairs t.test(meanJoLRatings2Wide$Semantic, meanJoLRatings2Wide$Rhyme, paired = TRUE) # Semantic v unrelated word pairs t.test(meanJoLRatings2Wide$Semantic, meanJoLRatings2Wide$Unrelated, paired = TRUE) # Rhyme v Unrelated word pairs t.test(meanJoLRatings2Wide$Rhyme, meanJoLRatings2Wide$Unrelated, paired = TRUE) ############################################################################### ## Analysis of Performance Scores DV ---- ## Calculation of the Hit rate, false alarm rate & corrected hit rate # Part of the set-up to analyse performance scores ## Hit rate targets <- data2[data2$running == 'Test' & data2$condition != 'Foil',] hitsLong <- ddply(targets, c('subject', 'encodinggroup', 'testgroup', 'condition'), summarise, totalTargets = length(running), hits = sum(acc), hitRate = 100*(hits/totalTargets)) hitsLong2 <- subset(hitsLong, select = -c(totalTargets, hits)) hitsWide <- spread(hitsLong2, condition, hitRate) ## False Alarm Rate foils <- data2[data2$condition == 'Foil',] falseAlarms <- ddply(foils, c('subject', 'encodinggroup', 'testgroup', 'condition'), summarise, totalFoils = length(running), correctRejection = sum(acc), falseAlarmRate = 100-100*(correctRejection/totalFoils)) falseAlarms2 <- subset(falseAlarms, select = -c(totalFoils, correctRejection, condition)) ## Corrected hit rate # Merge hit rates and false alarm rates into single data frame HitsFalaseAlarms <- merge(hitsWide, falseAlarms2) # Corrected Hit Rate per condition added to data frame correctedHitRate <- HitsFalaseAlarms %>% mutate(rhymeCorHit = Rhyme-falseAlarmRate, semanticCorHit = Semantic-falseAlarmRate, unrelatedCorHit = Unrelated-falseAlarmRate,) ################################################################################ ## Analysis of Performance Scores DV ---- ## Between-subjects ANOVA on the FLASE ALARMS: # encoding group (Jol v NoJoL) x # test group (Standard v Rhyme) ezANOVA( data = correctedHitRate, wid = subject, dv = falseAlarmRate, between = .(testgroup, encodinggroup), detailed = T, type = 3) # Mean false alarm rates for test group summarySE(correctedHitRate, measurevar = 'falseAlarmRate', groupvars = c('testgroup')) ################################################################################ ### Mixed ANOVA on hit rates: # encoding group (Jol v NoJoL) x # encoding condition (rhyming v semantic v unrelated) # test group (Standard v Rhyme) ezANOVA( data = hitsLong2, wid = subject, dv = hitRate, within = condition, between = .(testgroup, encodinggroup), detailed = T, type = 3) # Mean hit rates for test group summarySE(hitsLong2, measurevar = 'hitRate', groupvars = c('testgroup')) ## Post-hoc t-test for condition (within) ---- # Rhyme v Semantic word pairs t.test(hitsWide$Rhyme, hitsWide$Semantic, paired = TRUE) # Semantic v unrelated word pairs t.test(hitsWide$Semantic, hitsWide$Unrelated, paired = TRUE) # Rhyme v Unrelated word pairs t.test(hitsWide$Rhyme, hitsWide$Unrelated, paired = TRUE) # Mean hit rates for condition summarySE(hitsLong2, measurevar = 'hitRate', groupvars = c('condition')) ## Post-hoc t-tests for test group x condition ---- ## Effect of condition within standard test group recognition <- hitsWide[hitsWide$testgroup == 'Recognition',] # Standard test group: Rhyme v Semantic word pairs t.test(recognition$Rhyme, recognition$Semantic, paired = TRUE) # Standard test group: Rhyme v Unrelated word pairs t.test(recognition$Rhyme, recognition$Unrelated, paired = TRUE) # Standard test group: Semantic v unrelated word pairs t.test(recognition$Semantic, recognition$Unrelated, paired = TRUE) # Mean hit rates for condition with standard recognition group recognitionLong <- hitsLong2[hitsLong2$testgroup == 'Recognition',] summarySE(recognitionLong, measurevar = 'hitRate', groupvars = 'condition') ## Effect of condition within rhyme test group ---- rhyme <- hitsWide[hitsWide$testgroup == 'Rhyme',] # Rhyme test group: Rhyme v Semantic word pairs t.test(rhyme$Rhyme, rhyme$Semantic, paired = TRUE) # Rhyme test group: Rhyme v Unrelated word pairs t.test(rhyme$Rhyme, rhyme$Unrelated, paired = TRUE) # Rhyme test group: Semantic v unrelated word pairs t.test(rhyme$Semantic, rhyme$Unrelated, paired = TRUE) # Mean hit rates for condition with rhyme recognition group rhymeLong <- hitsLong2[hitsLong2$testgroup == 'Rhyme',] summarySE(rhymeLong, measurevar = 'hitRate', groupvars = 'condition') ################################################################################ ### Mixed ANOVA on corrected hit rates: # encoding group (Jol v NoJoL) x # encoding condition (rhyming v semantic v unrelated) # test group (Standard v Rhyme) correctedLong <- gather(correctedHitRate, condition, correctedHitRate, rhymeCorHit:unrelatedCorHit, factor_key=TRUE) ezANOVA( data = correctedLong, wid = subject, dv = correctedHitRate, within = condition, between = .(testgroup, encodinggroup), detailed = T, type = 3) # Mean corrected hit rates for test group summarySE(correctedLong, measurevar = 'correctedHitRate', groupvars = c('testgroup')) ## Post-hoc t-test for condition (within) ---- # Rhyme v Semantic word pairs t.test(correctedHitRate$rhymeCorHit, correctedHitRate$semanticCorHit, paired = TRUE) # Semantic v unrelated word pairs t.test(correctedHitRate$semanticCorHit, correctedHitRate$unrelatedCorHit, paired = TRUE) # Rhyme v Unrelated word pairs t.test(correctedHitRate$rhymeCorHit, correctedHitRate$unrelatedCorHit, paired = TRUE) # Mean corrected hit rates for condition summarySE(correctedLong, measurevar = 'correctedHitRate', groupvars = c('condition')) ## Post-hoc t-tests for test group x condition ---- ## Effect of condition within standard test group corRecog <- correctedHitRate[correctedHitRate$testgroup == 'Recognition',] # Standard test group: Rhyme v Semantic word pairs t.test(corRecog$rhymeCorHit, corRecog$semanticCorHit, paired = TRUE) # Standard test group: Rhyme v Unrelated word pairs t.test(corRecog$rhymeCorHit, corRecog$unrelatedCorHit, paired = TRUE) # Standard test group: Semantic v unrelated word pairs t.test(corRecog$semanticCorHit, corRecog$unrelatedCorHit, paired = TRUE) # Mean corrected hit rates for condition with standard recognition group recCorLong <- correctedLong[correctedLong$testgroup == 'Recognition',] summarySE(recCorLong, measurevar = 'correctedHitRate', groupvars = 'condition') ## Effect of condition within rhyme test group ---- corRhyme <- correctedHitRate[correctedHitRate$testgroup == 'Rhyme',] # Rhyme test group: Rhyme v Semantic word pairs t.test(corRhyme$rhymeCorHit, corRhyme$semanticCorHit, paired = TRUE) # Rhyme test group: Rhyme v Unrelated word pairs t.test(corRhyme$rhymeCorHit, corRhyme$unrelatedCorHit, paired = TRUE) # Rhyme test group: Semantic v unrelated word pairs t.test(corRhyme$semanticCorHit, corRhyme$unrelatedCorHit, paired = TRUE) # Mean hit rates for condition with rhyme recognition group rhymeCorLong <- correctedLong[correctedLong$testgroup == 'Rhyme',] summarySE(rhymeCorLong, measurevar = 'correctedHitRate', groupvars = 'condition')