Attachment 'CodingLectureRochester.R'
Download 1 library(languageR)
2 ################ Reads in Data #############################
3 d<-read.table("fakedata.txt", header=TRUE)
4 #renames factors
5 d$IV1<-ifelse(d$IV1==1, "silent", "noise")
6 d$IV2<-ifelse(d$IV2==2, "word", ifelse(d$IV2==3, "legal", "illegal"))
7
8 d$NoiseCond<-as.factor(d$IV1)
9 d$WordCond<-as.factor(d$IV2)
10 d$WordCond<-as.factor(d$WordCond)
11 d$NoiseCond<-as.factor(d$NoiseCond)
12 d$Freq<-as.numeric(d$Freq)
13
14 ##below lets you look at condition means for fake data set
15 attach(d)
16 RTmeans<-aggregate(RT, list(WordCond), FUN=mean)
17 RTmeansWN<-aggregate(RT, list(WordCond, NoiseCond),FUN= mean)
18
19
20 ########### Sets up Treatment Coding ############################
21
22 d$WordCond.Treatment<-d$WordCond
23
24
25 # R automatically assigns levels alphabetically, this isn't always what you'll want, so you can reassign the order of the levels as shown below...
26
27
28 d$WordCond.Treatment<-factor(d$WordCond.Treatment, levels=c("word","legal","illegal")) #reorders levels to put "word" in baseline position (1st in list)
29
30 # R's default is to set coding scheme to Treatment, so here you don't need to do anything else now that the levels are ordered appropriately.
31
32 #More generally, if you just want to specify which level is the baseline you can do the following:
33
34 #contrasts(d$WordCond.Treatment)<-contr.treatment(3, base=3)
35
36 #This says set the contrasts to treatment coding with 3 levels, with the 3rd level being the base condition
37
38
39 lin.Treatment<-lmer(RT ~ WordCond.Treatment + (1|Subject) + (1|Item), data=d) #linear model
40
41 log.Treatment<-lmer(Response~WordCond.Treatment +(1|Subject) + (1|Item), data=d, family="binomial" )
42
43 ###another way to change the base condition that lets you see what levels are being tested
44
45 lin.Treatment.relevel<-lmer(RT ~ relevel(WordCond, ref="legal") + (1|Subject) + (1|Item), data=d)
46
47 ##this example sets the base level to legal nonwords. Notice, each coefficient is different here because the comparisions being tested are different (i.e., "word vs. legal", "illegal vs. legal")
48
49
50
51 ########### Sets up Effects Coding ############################
52
53 d$WordCond.Effects<-d$WordCond
54
55 # If you want to make your outputs more readable (easier to figure out what each Ci is doing), then you can manually create the contrast matrix
56
57 contrasts(d$WordCond.Effects)<-cbind("illegal.v.GM"= c(1, 0, -1), "legal.v.GM"= c(0, 1, -1)) #renames Cis to give indication of what is being tested... C1 = illegal vs. grandmean, C2= legal vs.grandmean
58
59 #The basic command to create effects coding variables is contr.sum(n), where n = the number of levels of your factor.
60
61 lin.Effects<-lmer(RT ~ WordCond.Effects + (1|Subject) + (1|Item), data=d) #output is exactly the same as before, but now the levels of output give description of contrasts
62
63 log.Effects<-lmer(Response ~ WordCond.Effects + (1|Subject) + (1|Item), data=d, family="binomial")
64
65 ########### Sets up Helmert Coding ############################
66
67 # R has a default command for creating helmert contrast weights contr.helmert(); however, these weights don't result in directly interpretable coefficients. So, we'll need to set them by hand.
68
69
70 #Let's make each Ci equal the difference between the means we're testing.
71 d$WordCond.Helm.Reg<-d$WordCond
72
73 contrasts(d$WordCond.Helm.Reg)<-cbind("leg.vs.ill"= c(-.5, .5, 0),"word.vs.nons"=c (-(1/3), -(1/3), (2/3)) ) #renames Cis to give indication of what is being tested... C1 = illegal vs. legal, C2= word vs.nonwords(mean of other two levels)
74
75
76 lin.Helm.Reg<-lmer(RT ~ WordCond.Helm.Reg + (1|Subject) + (1|Item), data=d) # This model allows for directly interpretable Bs - difference between conditions
77
78 log.Helm.Reg<-lmer(Response ~ WordCond.Helm.Reg + (1|Subject) + (1|Item), data=d, family="binomial") # This model allows for directly interpretable Bs - difference between conditions
79
80
81 ########### Sets up Polynomial Coding ############################
82
83 d$WordCond.Poly<-d$WordCond
84
85 contrasts(d$WordCond.Poly)<-contr.poly(3) #This specifies that you want to do polynomial coding (tests linear and quadratic components for this example with 3-levels)
86
87 #Orthogonal coding scheme, but interpretations of Cis are different because you're not testing for differences among group means
88
89 lin.Poly<-lmer(RT ~ WordCond.Poly + (1|Subject) + (1|Item), data=d)
90
91
92 # Output shows .L for linear component and .Q for quadratic. If the .L coeff is significant, then the regression requires a linear component, if the .Q coeff is significant, the effect has a quadratic component.
93
94 #########Rsquare comparisons#######################
95
96 rsq.Treatment<-cor(d$RT, fitted(lin.Treatment))^2
97 rsq.Effects<-cor(d$RT, fitted(lin.Effects))^2
98 rsq.Helm<-cor(d$RT, fitted(lin.Helm.Reg))^2
99 rsq.Poly<-cor(d$RT, fitted(lin.Poly))^2
100
101
102
103 ###############CATEGORICAL INTERACTION##########################
104 #These are linear models....
105
106 #d$NoiseCond (default is to treatment code )
107 l.Treatment.int<-lmer(RT ~ NoiseCond*WordCond.Treatment + (1|Subject) + (1|Item), data=d)
108
109 ###############CATEGORICAL X CONTINUOUS INTERACTION##############
110
111
112 l.Treatment.cont<-lmer(RT ~ Freq*WordCond.Treatment + (1|Subject) + (1|Item), data=d)
113
114
115 ####centered frequency variable
116
117 d$c.Freq<-d$Freq-mean(d$Freq)
118
119 l.Treatment.cont.c<-lmer(RT ~ c.Freq*WordCond.Treatment + (1|Subject) + (1|Item), data=d)
120
121
122 p.Treatment.cont<-pvals.fnc(l.Treatment.cont)
123 p.Treatment.cont.c<-pvals.fnc(l.Treatment.cont.c)
124
125
126 rsq.Treatment<-cor(d$RT, fitted(l.Treatment.cont))^2
127 rsq.Treatment.c<-cor(d$RT, fitted(l.Treatment.cont.c))^2
128
129
130
131
132 #########Non-Orthogonal Coding and Collinearity ################
133
134 ##set up coding schemes by hand...creates new coding variables that have the appropriate coding scheme... here we're setting up the equivalent of treatment coding with base of "word"
135
136 d$Legal.v.Word<- ifelse(d$WordCond == "legal", 1, 0)
137 d$Illegal.v.Word<- ifelse(d$WordCond == "illegal", 1, 0)
138
139 nonOrth<-lmer(RT~Legal.v.Word+Illegal.v.Word + (1|Subject)+ (1|Item), data=d)
140
141 #compare nonOrth to the previous model we ran with treatment coding using contr.treatment() and "word" as our baseline.
142 summary(lin.Treatment)
143
144 ##run models excluding one of the coding predictors
145 #use model comparison to determine if the predictor representing the comparison significantly improves model fit
146
147
148 nonOrth1<-lmer(RT~Legal.v.Word + (1|Subject)+ (1|Item), data=d)
149 nonOrth2<-lmer(RT~Illegal.v.Word + (1|Subject)+ (1|Item), data=d)
150
151 anova(nonOrth, nonOrth1)
152 anova(nonOrth, nonOrth2)
153
154 ###so what should we determine here?
155
156
157 ##more complex example of setting up a model by hand with interaction of two categorical variables
158
159 d$Silent.v.Noise<-ifelse(d$NoiseCond == "silent", 1, 0)
160
161
162 ###full model (compare to l.Treatment.int)
163 nonOrthInt<-lmer(RT~ Silent.v.Noise + Legal.v.Word + Illegal.v.Word + Silent.v.Noise:Legal.v.Word + Silent.v.Noise:Illegal.v.Word + (1|Subject) + (1|Item), data=d)
164
165 ###reduced models removing each of the interaction terms
166 nonOrthInt2<-lmer(RT~ Silent.v.Noise + Legal.v.Word + Illegal.v.Word + Silent.v.Noise:Legal.v.Word + (1|Subject) + (1|Item), data=d)
167
168 nonOrthInt3<-lmer(RT~ Silent.v.Noise + Legal.v.Word + Illegal.v.Word + Silent.v.Noise:Illegal.v.Word + (1|Subject) + (1|Item), data=d)
169
170 ###compares the fully crossed model to the reduced models
171 anova(nonOrthInt, nonOrthInt2)
172 anova(nonOrthInt, nonOrthInt3)
Attached Files
To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.You are not allowed to attach a file to this page.