Download S2 File. Scripts of R functions. 1. R code to compute Optimal

Survey
yes no Was this document useful for you?
   Thank you for your participation!

* Your assessment is very important for improving the workof artificial intelligence, which forms the content of this project

Document related concepts
no text concepts found
Transcript
S2 File. Scripts of R functions.
1. R code to compute Optimal Simon design
1
#######
2
### Optimal.Simon.Plan allow to compute an Optimal two stage Simon plan
3
### ARGUMENTS :
4
### Here we denote
5
# pi0 :
the minimum expected efficacy of the treatment
6
# pi1 :
the desirable target level of treatment efficacy
7
# alpha:
the type I error rate
8
# beta:
the type II error rate
9
# Nmax :
trial
the maximum number of patients to be included in the Phase II
11
### VALUES :
12
# a list :
13
# $risk :
# $j :
PLOS
10
final Simon Optimal design
14
stage
15
# $n:
the number of patients included at stage j (N1, N2)
16
# $r:
the stopping boundaries at stage j (r1, r2)
17
# $alpha :
the spending function of type I error rate
18
# $beta :
the spending function of type II error rate
19
# $pet0 :
probability of early termination under the null hypothesis
20
# $pet1 : probability of early termination under the alternative
hypothesis
21
# $design: all the selected design satisfing A(pi0)<alpha and
A(pi1)>1-beta
23
Optimal.Simon.Plan<-function(pi0, pi1, alpha, beta, Nmax) {
25
r12<-expand.grid(list(r1=0:Nmax, r2=0:Nmax))
26
n12<-r12;names(n12)<-c("n1", "n2")
27
n12<-n12[n12$n1+n12$n2<=Nmax & n12$n1>0 & n12$n2>0,]
28
dim(n12)
29
r12<-r12[r12$r1<r12$r2,]
30
dim(r12)
31
id<-0
32
22
24
1/16
PLOS
N<-Nmax
33
enh0C<-Nmax
34
n12<-n12[order(runif(dim(n12)[1])),]
35
n<-dim(n12)[1]
36
nid<-0
37
while (n>0) {
38
n1<-n12$n1[1]
39
n2<-n12$n2[1]
40
pi12<-expand.grid(list(k1=0:n1, k2=0:n2))
41
pi12$pk1.0<-dbinom(pi12$k1, n1, pi0)
42
pi12$pk2.0<-dbinom(pi12$k2, n2, pi0)
43
pi12$pk1.1<-dbinom(pi12$k1, n1, pi1)
44
pi12$pk2.1<-dbinom(pi12$k2, n2, pi1)
45
pi12$pk12.0<-pi12$pk1.0*pi12$pk2.0
46
pi12$pk12.1<-pi12$pk1.1*pi12$pk2.1
47
r12c<-r12[r12$r1<n1 & r12$r2<n1+n2,]
48
r12c<-r12c[order(runif(dim(r12c)[1])),]
49
dim(r12c)[1]
50
r12c$beta1<-pbinom(r12c$r1, n1, pi1)
51
r12c<-r12c[r12c$beta1<beta,]
52
dim(r12c)[1]
53
r12c$pet1<-pbinom(r12c$r1, n1, pi0)
54
r12c$enh0C<-n1+(1-r12c$pet1)*n2
55
r12c<-r12c[r12c$enh0C<=enh0C,]
56
dim(r12c)[1]
57
nr12c<-dim(r12c)[1]
58
while (nr12c>0) {
59
r1<-r12c$r1[1]
60
r2<-r12c$r2[1]
61
beta1<-r12c$beta1[1]
62
enh0<-r12c$enh0C[1]
63
ind<-(pi12$k1>r1) & (pi12$k2>r2-pi12$k1)
64
alpha2<-sum(pi12$pk12.0[ind])
65
ind<-(pi12$k1>r1) & (pi12$k2<=r2-pi12$k1)
66
beta2<-sum(pi12$pk12.1[ind])
67
c(beta1, alpha2, beta2)
68
2/16
PLOS
if (alpha2<=alpha & beta1+beta2<=beta) {
69
if (enh0<enh0C) {
70
id<-id+1
71
enh0C<-enh0
72
N<-n1+n2
73
d<-c(n1, n2, r1, r2, 0, beta1, alpha2, beta2, enh0, N)
74
if (id==1) {
75
design<-d
76
} else {
77
design<-rbind(design, d)
78
}
79
} else {
80
if (n1+n2<N) {
81
id<-id+1
82
N<-n1+n2
83
d<-c(n1, n2, r1, r2, 0, beta1, alpha2, beta2, enh0, N)
84
if (id==1) {
85
design<-d
86
} else {
87
design<-rbind(design, d)
88
}
89
}
90
}
91
r12c<-r12c[-1,];nr12c<-nr12c-1
92
} else {
93
if (alpha2>alpha) {
94
r12c<-r12c[r12c$r1!=r1 | (r12c$r1==r1 & r12c$r2>r2),]
95
}
96
if (beta1+beta2>beta) {
97
r12c<-r12c[r12c$r1!=r1 | (r12c$r1==r1 & r12c$r2<r2),]
98
}
99
nr12c<-dim(r12c)[1]
100
}
101
}
102
if (id>nid) {
103
n12<-n12[n12$n1<=floor(enh0C),]
104
3/16
n<-dim(n12)[1]
105
nid<-id
106
} else {
107
n12<-n12[-1,];n<-n-1
108
}
109
}
110
risk<-NULL
111
if (id>0) {
112
d<-design[id,]
113
design<-data.frame(design)
114
names(design)<-c("n1", "n2","r1", "r2", "alpha1", "beta1","alpha2",
"beta2", "EN", "n_cum")
115
# Loading the Calcul.Simon.Risk function is required
117
risk<-Calculation.Simon.Risk(d[1], d[3], d[2], d[4], pi0, pi1)
118
} else{
119
stop("No design found")
120
}
121
res<-list(risk=risk, design=design)
122
return(res)
123
}
124
#######
125
### Calculation.Simon. Risk allow to calculate Simon risk depending on
the quadruplet and pi0 and pi1
126
### ARGUMENTS :
128
# n1:
the number of patients included at stage 1
129
# n2:
the additional number of patients included at stage 2
130
# r1 :
# r2:
116
the stopping boundary at stage 1
131
the stopping boundary at stage 2
132
# pi0 :
the minimum expected efficacy of the treatment
133
# pi1 :
the desirable target level of treatment efficacy
134
### VALUES :
135
# a dataframe :
# $j :
:
final Simon Optimal design
136
stage
137
# $n:
the number of patients included at stage j (N1, N2)
138
# $r:
the stopping boundaries at stage j (r1, r2)
139
# $alpha :
PLOS
127
the spending function of type I error rate
140
4/16
# $beta :
the spending function of type II error rate
141
# $pet0 :
probability of early termination under the null hypothesis
142
# $pet1 : probability of early termination under the alternative
hypothesis
143
Calculation.Simon.Risk<-function(n1, r1, n2, r2, pi0, pi1) {
145
k<-(r1+1):n1
146
pk1<-dbinom(k, n1, pi1)
147
pk0<-dbinom(k, n1, pi0)
148
beta1<-pbinom(r1, n1, pi1)
149
beta2<-sum(pk1*pbinom(r2-k, n2, pi1))
150
alpha2<-sum(pk0*(1-pbinom(r2-k, n2, pi0)))
151
pet01<-pbinom(r1, n1, pi0)
152
pet02a<-sum(pk0*pbinom(r2-k, n2, pi0))
153
pet02b<-sum(pk0*(1-pbinom(r2-k, n2, pi0)))
154
pet11<-pbinom(r1, n1, pi1)
155
pet12a<-sum(pk1*pbinom(r2-k, n2, pi1))
156
pet12b<-sum(pk1*(1-pbinom(r2-k, n2, pi1)))
157
res<-data.frame(j=1:2, n=c(n1, n2+n1), r=c(r1, r2), alpha=c(0, alpha2),
alphacum=c(0, alpha2), beta=c(beta1, beta2), betacum=cumsum(c(beta1,
beta2)))
158
res$pet0<-c(pet01, pet02a+pet02b)
161
res$pet1<-c(pet11, pet12a+pet12b)
162
return(res)
163
}
164
2. R code to simulate data
165
##########################
166
####### Simul.data :
167
function which allow to simulate data
159
160
### ARGUMENTS :
168
# N: Total number of patients to simulate
169
# pi :
170
the theorical response rate
# theta :
# t0 :
the unevaluable patients rate
171
the evaluation time point
# Tdistrib :
PLOS
144
172
the distribution of the latent failure times T;
173
# W correspond to a Weibull distribution
174
# E correspond to an Exponential distribution
175
5/16
PLOS
# L correspond to a Log logistic distribution
176
# Cdistrib :
177
the distribution of the censoring times C;
# U correspond to an Uniform distribution
178
# E correspond to an Exponential distribution
179
### VALUES :
180
# a data frame
181
# $time: the time between inclusion and response evaluation or last
known contact
182
# $X : a dummy variable which indicates if patients respond to the
therapy (X=1) ,
184
# do not respond (X=0) or is unevaluable (X=NA)
186
Simul.data<-function(N=100, pi=0.2, theta=0, t0=1, Tdistrib=c("W",
"L","E"), Cdistrib=c("U", "E")){
187
Tdistrib<-Tdistrib[1]
189
Cdistrib<-Cdistrib[1]
190
# Define the cumulative probability and the random generation for the C
distribution
191
if(Cdistrib=="U"){
193
G<-function(c, C){ifelse(c<C, c/C ,1)}
194
rg<-function(n, C){runif(n, 0, C)}
195
}
196
if(Cdistrib=="E"){
197
G<-function(c, C){1-exp(-C*c)}
198
rg<-function(n, C){rexp(n,C)}
199
}
200
# Define the cumulative probability and the random generation for the T
distribution, specify the distribution
201
if(Tdistrib=="L"){
203
scale<-2
204
shape<-t0*((pi/(1-pi))^(1/scale))
205
f<-function(t, shape, scale){
206
y<-(t/shape)^scale
207
y<-(y/((1+y)^2))*(scale/t)
208
return(y)
209
}
210
rf<-function(n, shape, scale){
211
U<-runif(n, 0,1)
212
183
185
188
6/16
192
202
return(((U/(1-U))^(1/scale))*shape)
213
}
214
}
215
if(Tdistrib=="W"){
216
scale<-2
217
shape<-t0*((-log(pi))^(-1/scale))
218
f<-function(t, shape, scale){
219
y<-dweibull(t, shape=scale, scale=shape)
220
return(y)
221
}
222
rf<-function(n, shape, scale){rweibull(n, shape=scale, scale=shape)}
223
}
224
if(Tdistrib=="E"){
225
shape<-(-log(pi))/t0
226
scale<-1
227
f<-function(t, shape, scale){
228
y<-dexp(t, rate=shape)
229
return(y)
230
}
231
rf<-function(n, shape, scale){rexp(n, rate=shape)}
232
}
233
#If there is no unevaluable patients :
PLOS
do not introduce censoring
234
if(theta==0){
235
lambda<-Inf
236
}else{
237
#to determine the lambda parameter
238
fct<-function(C, t0, shape, scale, theta){ PdV(C, t0, shape, scale, f,
G)-theta}
239
lambda<-uniroot(fct,interval=c(0.01, 1000),t0=t0, shape=shape,
scale=scale, theta=theta)$root
241
}
243
#Random generation
244
T<-rf(N, shape, scale)
245
if(lambda==0){
246
C<-T*Inf
247
}else{
248
240
242
7/16
C<-rg(N, lambda)
249
}
250
#calculate the time
251
time<-pmin(T, C)
252
#classify each simulated patient as responder (X=1), non responder (X=0)
or unevaluable patients (X=NA)
253
X<-ifelse(T>t0, 1,0)
255
X<-ifelse(C<T & C<t0, NA,X)
256
data<-data.frame(time=time, X=X)
257
return(data)
258
}
259
#############
260
#### analyze_data : count the number of unevaluable patients and the
actuarial survival at lt0 and t0 at each stage
261
### ARGUMENTS :
263
# N1:
264
number of patients included at the first stage
# N2 :
number of patients included at the second stage
# data :
# t0 :
# l :
267
response, X=0 non response, X=NA
268
269
the evaluation time point
270
the time ratio (as described in the article)
271
### VALUES :
272
# a list
273
# $stage1 :
# $Z1 :
result at stage 1
274
number of unevaluable patient at stage 1
275
# $AC : actuarial survival estimated at stage 1
276
# $S1 : the number of response at stage 1 among the N1-Z1 evaluable
patients
277
# $stage2 :
279
# $Z2 :
PLOS
266
the time from inclusion to response evaluation
# data$X : a dummy variable with X=1:
: unevaluable
262
265
a data frame with
# data$time :
254
278
result at stage 2
number of unevaluable patient at stage 2
280
# $AC : actuarial survival estimated at stage 2
281
# $S2 : the number of response at stage 2 among the N2-Z2 evaluable
patients
282
analyze_data<-function(N1, N2, data, t0, l=NULL){
284
#Select the appropriate number patient to analyze
285
283
8/16
N2<-min(N2, dim(data)[1])
286
data1<-data[1:N1,]
287
data2<-data[1:N2,]
288
#calculate the unevaluable patients
289
Z1<-as.numeric(table(data1$X, exclude=NULL)[3])
290
Z2<-as.numeric(table(data2$X, exclude=NULL)[3] )
291
S1<-as.numeric(table(data1$X, exclude=NULL)[2])
292
S2<-as.numeric(table(data2$X, exclude=NULL)[2] )
293
#Estimate the actuarial survival at l.t0 and t0
294
if(is.null(l)){
295
AC1<-NULL
296
AC2<-NULL
297
} else {
298
AC1<- AC.fct(data1$time, data1$X, t0, l)$Scum
299
AC2<- AC.fct(data2$time, data2$X, t0, l)$Scum
300
}
301
res1<-list(Z1=Z1, AC=AC1, S1=S1)
302
res2<-list(Z2=Z2, AC=AC2, S2=S2)
303
res<-list( stage1=res1, stage2=res2)
304
return(res)
305
}
306
#############
307
#### AC: function wich estimate the actuarial survivate rate at l.t0 and
t0
308
### ARGUMENTS :
310
# time:
311
time from inclusion to response evaluation
# response : response=1: responder, response=0 non responder and
response=NA : uneavluable patients
312
# t0 :
314
# l :
313
the evaluation time point
the time ratio (as described in the article)
315
### VALUES :
316
# a dataframe
317
# $ti :
318
beginning of the time interval
# $t(i+1) :
PLOS
309
end of the time interval
319
# $ci :
number of censored subjects
320
# $di:
number of event (1-response)
321
9/16
# $ni:
number of patients at risk during the interval (ti , t(i+1))
# $Scond :
# $Scum :
conditional survival during the interval (ti , t(i+1))
323
actuarial survival estimated at t(i+1)
324
AC.fct<-function(time, response, t0, l) {
325
#time<-DATA$time; response<-DATA$X ; l<-1/2
326
event<-ifelse(is.na(response) , 0, 1-response)
327
inter<-unique(c(0, l*t0, t0))
328
n<-length(time)
329
nbi<-length(l)+1
330
#define time interval
331
i<-cut(c(-1,-1,time), c(-1,inter, Inf), right=F)
332
x<-table(i, c(0,1,event))
333
x<-x[-1,]
334
tab<-data.frame(ti=inter)
335
tab$"t(i+1)"<-c(inter[-1], "Inf")
336
#number of censored
337
tab$ci<-x[,1]
338
#number of event
339
tab$di<-x[,2]
340
#Patients at risk
341
tab$ni<-c(n, n-cumsum(tab$ci+tab$di)[-dim(tab)[1]])
342
#Conditional survival
343
tab$Scond<-(tab$ni-tab$ci*0.5-tab$di)/(tab$ni-tab$ci*0.5)
344
tab$Scond<-ifelse(is.na(tab$Scond) , 1 ,tab$Scond)
345
#Cumulative survival
346
tab$Scum<-cumprod(tab$Scond)
347
tab<-tab[-dim(tab)[1],]
348
return(tab)
349
}
350
3. R code to use rescue strategy because Z1 and Z2 unevaluable
patients appear
PLOS
322
351
352
#######
353
#### PdV function calculate the probability to be unevaluable at t0 when
F and G are specified
354
### ARGUMENTS :
356
10/16
355
PLOS
# Here, C is the censoring distribution parameter
357
# t0 :
358
time point for therapeutic evaluation
# shape :
the shape parameter of the distribution of T
359
# scale :
the scale parameter of the distribution of T
360
# f is the probability density function of T
361
# G is the cumulative probability function of C
362
### VALUES : the probability of being unevaluable at t0
363
PdV<-function(C, t0, shape, scale, f, G){
364
f1<-function(t, C, shape, scale){f(t, shape, scale)*G(t, C)}
365
f2<-function(t, t0,C, shape, scale){f(t, shape, scale)*G(t0, C)}
366
I1<-integrate(f1,0,t0, shape=shape, scale=scale, C=C)$value
367
I2<-integrate(f2,t0,Inf, shape=shape, scale=scale, C=C, t0=t0)$value
368
I<-I1+I2
369
return(I)
370
}
371
###############################################
372
##### Function tau.fct : calculate the probability of not responding to
the therapy and being evaluable at t0
373
### ARGUMENTS :
375
# C: is the censoring distribution(C) parameter
376
# t0 :
377
time point for therapeutic evaluation
374
#shape :
the shape parameter of the distribution of T
378
#scale :
the scale parameter of the distribution of T
379
#F et G the cumulative distribution function of T and C respectively
380
#g:
381
the probability density function of C
### VALUES :the probability of not responding to the therapy and being
evaluable at t0
382
tau.fct<-function( C, t0,shape, scale, F, G, g){
384
f1<-function(c,C,shape, scale){F(c, shape=shape, scale=scale)*g(c, C=C)}
385
I1<-(integrate(f1,0, t0, shape=shape, scale=scale, C=C)$value )
386
tau<-I1 + F(t0, shape=shape, scale=scale)*(1-G(t0, C=C))
387
return(tau)
388
}
389
#####Calculate type I error rate
390
### ARGUMENTS
391
# a:
392
vector of the stopping boundary at stage 1 and 2
11/16
383
# n:
vector of the number of patients included
# p :
the treatment efficacy
394
## VALUES : probability of rejecting the null hypothesis when treatment
efficacy is p
395
binom_alpha=function(r, n=c(10, 10), p=0.2){
397
sum(dbinom((r[1]+1):n[1] , n[1], p) * (1-pbinom (r[2] - ((r[1]+1):n[1]),
n[2] , p)))
398
}
400
#####Calculate type II error rate
401
### ARGUMENTS
402
# r:
vector of the stopping boundary at stage 1 and 2
403
# n:
vector of the number of patients included
404
# p :
the treatment efficacy
396
399
405
## VALUES : probability of not rejecting the null hypothesis when
treatment efficacy is p
406
binom_beta=function(r, n=c(10, 10), p=0.2){
408
sum(dbinom((r[1]+1):n[1] , n[1], p) * (pbinom (r[2] - ((r[1]+1):n[1]),
n[2] , p)))
409
}
411
#####
412
#### Adapt: furnish an the adaptive design denote (N1, a1, N2, a2) AD
as decsribed in the article
413
### ARGUMENTS :
415
#Z1 :
416
407
the number of unevaluable patients observed at stage 1
410
414
#Z2: the number of unevaluable patients observed at stage 2, if stage 2
is not completed yet Z2 is equal to Z1.
417
#N1 :
419
#N2:
PLOS
393
the planned sample size at stage 1
the planned sample size at stage 2
418
420
#pi0 :
the minimu expected efficacy of the treatment
421
#pi1 :
the desirable target level of treatment efficacy
422
#alpha:
the type I error rate
423
#beta:
the type II error rate
424
#bound: for stage 2 adaptation, bound is s=the stopping boundary used
at the first stage
425
#l :
427
the time ratio (as described in the article)
426
#AC : vector of actuarial estimation of response rate at l.t0 and t0
428
# (if length of AC is 1: exponential distribution is assumed) otherwise
weibull distribution is assumed)
429
12/16
430
#error_fct :
431
# phi1 :respect the ratio between alpha and beta, increase the type I
and type II error rates
432
# phi2:
rates
434
preserv the type I error rate and increase the type II error
433
435
### VALUES
436
#a list object
437
# $param :
design
438
design parameter used in order to establish the adapted
439
# pi0a : the pi0* probability of response when being evaluable at t0
and under H0
440
# pi1a : the pi0* probability of response when being evaluable at t0
and under H1
442
# alpha :
the initial type I error rate
444
# beta :
the initial type II error rate
445
# Nmax:
PLOS
the error rate function
the maximal number of patient to be included
441
443
446
# Z1 :
the number of unevaluable patients at the first stage
447
# Z2 :
the number of unevaluable patients at the second stage
448
# $desAD : the adapted design defined by the adapted quadruplet and
final the type I and type II error rate
449
# N1AD: number of evaluable patient at stage 1
451
# r1AD: the stopping boundary at first stage with Z1 unevaluable
patients
452
# N2AD: number of evaluable patient at stage 2
454
# r2AD: the stopping boundary at second stage with Z2 unevaluable
patients
455
# alphaAD : final type I error rate to establish the adapted design
457
# betaAD : final type II error rate to establish the adapted design
458
Adapt<-function( Z1, Z2=Z1,N1, N2, pi0,pi1, alpha, beta,t0, bound=NULL
,l=l, AC=NULL, error_fct=c("phi1", "phi2")){
459
##gamma=scale
461
#h00=shape0
462
#h01=shape1
463
Z2<-max(Z1, Z2)
464
if(is.null(bound)){bound<-c(N1, N2)
465
}else{
466
if(length(bound)==1){
467
bound<-c(bound, N2)
468
}
469
450
453
456
13/16
460
PLOS
}
470
# Adaptation at stage 1
471
if(Z2-Z1==0){
472
n1<-N1-Z1
473
n2<-N2-N1
474
theta<-Z1/N1
475
a<-expand.grid(list(r1=0:n1 , r2=0:n2))
476
}
477
# Adaptation at stage 2
478
if (Z2-Z1>0){
479
n1<-N1-Z1
480
n2<-N2-N1-(Z2-Z1)
481
theta<-Z2/N2
482
a<-expand.grid(list(r1=bound[1] , r2=0:n2))
483
}
484
G<-function(c, C){punif(c, 0, C)}
485
g<-function(c, C){dunif(c, 0,C)}
486
#Weibull adaptation
487
if(length(AC)==2){
488
f<-function(t,shape, scale){dweibull(t, shape=scale, scale=shape)}
489
F<-function(t,shape, scale){pweibull(t, shape=scale, scale=shape)}
490
#Find weibull parameter (h0 and gamma) (shape and scale respectively)
491
scale<- log(log(AC[2])/log(AC[1]))/(-log(l))
492
shape0<- t0/(-log(pi0))^(1/scale)
493
shape1<- t0/(-log(pi1))^(1/scale)
494
#Find uniform parameter (lambda)
495
fct<-function(C, t0, shape, scale, theta){ PdV(C, t0, shape, scale, f,
G)-theta}
496
lambda0<-uniroot(fct,interval=c(0.01, 1000),t0=t0, shape=shape0,
scale=scale, theta=theta)$root
498
lambda1<-uniroot(fct,interval=c(0.01, 1000),t0=t0, shape=shape1,
scale=scale, theta=theta)$root
500
#Calculate adapted pi1 and pi0 :
502
497
499
501
equation 5
pi1a<-1-(tau.fct( lambda1, t0,shape1, scale, F, G, g)/(1-theta))
503
pi0a<-1-(tau.fct( lambda0, t0,shape0, scale, F, G, g)/(1-theta))
504
}
505
#Exponential adaptation
506
14/16
PLOS
if(is.null(AC)){
507
f<-function(t,shape, scale){dexp(t, rate=shape)}
508
F<-function(t,shape, scale){1-exp(-(t*shape))}
509
#Find weibull parameter (h0 and gamma) (shape and scale respectively)
510
scale<- 1
511
shape0<- (-log(pi0))/t0
512
shape1<- (-log(pi1))/t0
513
#Find uniform parameter (lambda)
514
fct<-function(C, t0, shape, scale, theta){ PdV(C, t0, shape, scale, f,
G)-theta}
515
lambda0<-uniroot(fct,interval=c(0.01, 1000),t0=t0, shape=shape0,
scale=scale, theta=theta)$root
517
lambda1<-uniroot(fct,interval=c(0.01, 1000),t0=t0, shape=shape1,
scale=scale, theta=theta)$root
519
#Calculate adapted pi1 and pi0 :
521
516
518
520
equation 5
pi1a<-1-(tau.fct( lambda1, t0,shape1, scale, F, G, g)/(1-theta))
522
pi0a<-1-(tau.fct( lambda0, t0,shape0, scale, F, G, g)/(1-theta))
523
}
524
#### Détermination of the new stopping boundaries
525
a$r2<-a$r1 + a$r2
526
a<-a[a$r2<=bound[2] & a$r1<=bound[1],]
527
a$alpha1<- 0
528
a$beta1<-pbinom(a$r1, n1, pi1a)
529
a$alpha2<-apply(a[, c(1,2)], 1, binom_alpha, n=c(n1, n2), p=pi0a)
530
a$beta2<-apply(a[, c(1,2)], 1, binom_beta, n=c(n1, n2), p=pi1a)
531
a$enho<-n1+(1-pbinom(a$r1 , n1, pi0a))*n2
532
a$alpha<-a$alpha1+ a$alpha2
533
a$beta<-a$beta1+a$beta2
534
#error rate function to control type I and type II error rates
535
if(error_fct=="phi1"){
536
coef<-beta/alpha
537
a$alphacout<-a$beta/coef
538
a$ma<-pmax(a$alpha, a$alphacout)
539
a$ma[a$ma<alpha]<-2
540
a<-a[order(a$ma, a$enho),] #minimize the increase error rates then
E(N|H0)
541
desAD<-data.frame(N1AD = N1-Z1 , r1AD=a$r1[1], N2AD=N2-Z2 ,
r2AD=a$r2[1], alphaAD=a$ma[1] , betaAD=a$ma[1]*coef)
543
542
544
15/16
PLOS
}
545
if(error_fct=="phi2"){
546
a<-a[a$alpha<=alpha,]
547
a$beta[a$beta<beta]<-2
548
a<-a[order(a$beta, a$enho),] #minimize the increase error rates then
E(N|H0)
549
desAD<-data.frame(N1AD = N1-Z1 , r1AD=a$r1[1], N2AD=N2-Z2 ,
r2AD=a$r2[1], alphaAD=a$alpha[1] , betaAD=a$beta[1])
551
}
553
param<-data.frame(pi0a=pi0a, pi1a=pi1a, alpha=alpha , beta=beta,
Nmax=N2-Z2 , Z1=Z1, Z2=Z2)
554
res<-list(param=param, desAD=desAD)
556
return(res)
557
}
558
550
552
555
16/16