library(shiny)
#### Call power functions
#### Source code for power calculator
power_calculator <- function(mu_t, mu_c, sigma, alpha=0.05, N){
lowertail <- (abs(mu_t - mu_c)*sqrt(N))/(2*sigma)
uppertail <- -1*lowertail
beta <- pnorm(lowertail- qnorm(1-alpha/2), lower.tail=TRUE) +
1 - pnorm(uppertail- qnorm(1-alpha/2), lower.tail=FALSE)
return(beta)
}
#power_calculator(mu_t = 60, mu_c = 50, sigma = 20,N = 100)
#power.t.test(n = 50, delta = 10, sd = 20)
power_calculator_binary <- function(p1, p0, alpha=0.05, N){
lowertail <- (abs(p1 - p0) * sqrt(N/2))/sqrt(p1*(1-p1) + p0*(1-p0))
uppertail <- -1*lowertail
beta <- pnorm(lowertail- qnorm(1-alpha/2), lower.tail=TRUE) +
1 - pnorm(uppertail- qnorm(1-alpha/2), lower.tail=FALSE)
return(beta)
}
#power_calculator_binary(p1=.65, p0=.50, N=100)
#power.prop.test(p1 = .65, p2 = .50, n = 50)
power_calculator_cluster <- function(mu_t, mu_c, sigma, ICC, alpha=0.05, n_clus_per_arm, N){
n_per_clus <- N/(n_clus_per_arm*2)
if(n_per_clus < 1){return(NA)}
lowertail <- (abs(mu_t - mu_c) * sqrt((n_clus_per_arm - 1)*n_per_clus))/
sqrt((2*(sigma^2) * (1 + (n_per_clus-1)*ICC)))
uppertail <- -1*lowertail
beta <- pnorm(lowertail - qnorm(1-alpha/2), lower.tail=TRUE) +
1 - pnorm(uppertail - qnorm(1-alpha/2), lower.tail=FALSE)
return(beta)
}
power_calculator_binary_cluster <- function(p1, p0, ICC, alpha=0.05, n_clus_per_arm, N){
n_per_clus <- N/(n_clus_per_arm*2)
if(n_per_clus < 1){return(NA)}
lowertail <- (abs(p1 - p0) * sqrt((n_clus_per_arm - 1)*n_per_clus))/
sqrt((p1*(1-p1) + p0*(1-p0)) * (1 + (n_per_clus-1)*ICC))
uppertail <- -1*lowertail
beta <- pnorm(lowertail- qnorm(1-alpha/2), lower.tail=TRUE) +
1 - pnorm(uppertail- qnorm(1-alpha/2), lower.tail=FALSE)
return(beta)
}
####################
ui <- fluidPage(
includeScript("../../../Matomo-tquant.js"),
titlePanel("Check your stats!"),
mainPanel(
tabsetPanel(
tabPanel("Introduction",
########################
p("With this app, you will get a personal advice on how to improve your study according to your data and methodological approach. Answer all questions below in order to know how to control for possible flaws in your research, e.g. p-hacking.
Don't worry if your answers are not the best ones because you will get guidance for the appropriate way of proceeding!") #intro text here
),
tabPanel("What analysis to use?",
####################
##Root
radioButtons("main", label = h4("Is your independent variable continuous or discrete?"),
choices = list("Continuous" = 1, "Discrete" = 2), selected = character(0)),
tags$hr(),
#Branch 1
conditionalPanel(
condition = "input.main == 1",
radioButtons("b1", label = h4("Is your dependent variable continuous or discrete?"),
choices = list("Continuous" = 1, "Discrete" = 2), selected = character(0)
),
tags$hr(),
conditionalPanel(
#Branch 1-1
condition = "input.b1 == 1",
radioButtons("b1_1", label = h4("How many dependent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)
),
tags$hr(),
conditionalPanel(
#Branch 1-1-1
condition = "input.b1_1 == 1",
radioButtons("b1_1_1", label = h4("How many independent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)
),
tags$hr(),
conditionalPanel(
#Branch 1-1-1-1
condition = "input.b1_1_1 == 1",
textOutput("b1_1_1_1"),
h4("We suggest using Univariate Regression."),
p("- You assume that there is a linear relationship between your dependent and independent variable(s)
You can best test this by making a scatter plot"),
p("- You assume that your data come from a normal distribution
You can test normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- In the case of multiple dependent variables you assume absence of multicollinearity
This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
tags$div(id = 'reg')
),
conditionalPanel(
#Branch 1-1-1-2
condition = "input.b1_1_1 == 2",
textOutput("b1_1_1_2"),
h4("We suggest using Multiple Regression."),
p("- You assume that there is a linear relationship between your dependent and independent variable(s)
You can best test this by making a scatter plot"),
p("- You assume that your data come from a normal distribution
You can test normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- In the case of multiple dependent variables you assume absence of multicollinearity
This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
tags$div(id = 'reg')
)
),
conditionalPanel(
#Branch 1-1-2
condition = "input.b1_1 == 2",
radioButtons("b1_1_2", label = h4("How many independent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)
),
tags$hr(),
conditionalPanel(
#Branch 1-1-2-1
condition = "input.b1_1_2 == 1",
h4("We suggest using Multivariate Regression."),
p("- You assume that there is a linear relationship between your dependent and independent variable(s)
You can best test this by making a scatter plot"),
p("- You assume that your data come from a normal distribution
You can test normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- In the case of multiple dependent variables you assume absence of multicollinearity
This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
tags$div(id = 'reg')
),
conditionalPanel(
#Branch 1-1-2-2
condition = "input.b1_1_2 == 2",
h4("We suggest using Multivariate Multiple Regression."),
p("- You assume that there is a linear relationship between your dependent and independent variable(s)
You can best test this by making a scatter plot"),
p("- You assume that your data come from a normal distribution
You can test normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- In the case of multiple dependent variables you assume absence of multicollinearity
This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
tags$div(id = 'reg')
)
)
),
conditionalPanel(
#Branch 1-2
condition = "input.b1 == 2",
radioButtons("b1_2", label = h4("How many dependent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)
),
tags$hr(),
conditionalPanel(
#Branch 1-2-1
condition = "input.b1_2 == 1",
radioButtons("b1_2_1", label = h4("How many independent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)
),
tags$hr(),
conditionalPanel(
#Branch 1-2-1-1
condition = "input.b1_2_1 == 1",
h4("We suggest using Logistic Regression."),
p("- You assume that there is a linear relationship between your dependent and independent variable(s)
You can best test this by making a scatter plot"),
p("- You assume that your data come from a normal distribution
You can test normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- In the case of multiple dependent variables you assume absence of multicollinearity
This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
tags$div(id = 'reg')
),
conditionalPanel(
#Branch 1-2-1-2
condition = "input.b1_2_1 == 2",
h4("We suggest using Multiple Logistic Regression."),
p("- You assume that there is a linear relationship between your dependent and independent variable(s)
You can best test this by making a scatter plot"),
p("- You assume that your data come from a normal distribution
You can test normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- In the case of multiple dependent variables you assume absence of multicollinearity
This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
tags$div(id = 'reg')
)
),
conditionalPanel(
#Branch 1-2-2
condition = "input.b1_2 == 2",
radioButtons("b1_2_2", label = h4("How many independent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)
),
tags$hr(),
conditionalPanel(
#Branch 1-2-2-1
condition = "input.b1_2_2 == 1",
h4("We suggest using Multivariate Logistic Regression."),
p("- You assume that there is a linear relationship between your dependent and independent variable(s)
You can best test this by making a scatter plot"),
p("- You assume that your data come from a normal distribution
You can test normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- In the case of multiple dependent variables you assume absence of multicollinearity
This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
tags$div(id = 'reg')
),
conditionalPanel(
#Branch 1-2-2-2
condition = "input.b1_2_2 == 2",
h4("We suggest using Multivariate Logistic Regression."),
p("- You assume that there is a linear relationship between your dependent and independent variable(s)
You can best test this by making a scatter plot"),
p("- You assume that your data come from a normal distribution
You can test normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- In the case of multiple dependent variables you assume absence of multicollinearity
This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
tags$div(id = 'reg')
)
)
)
),
#Branch 2
conditionalPanel(
condition = "input.main == 2",
radioButtons("b2", label = h4("How many levels does your independent variable have?"),
choices = list("Two" = 1, "More than two" = 2), selected = character(0)
),
tags$hr(),
conditionalPanel(
#Branch 21
condition = "input.b2 == 1",
radioButtons("b2_1", label = h4("Is your dependent variable continuous or discrete?"),
choices = list("Continuous" = 1, "Discrete" = 2), selected = character(0)
),
tags$hr(),
conditionalPanel(
#Branch 211
condition = "input.b2_1 == 1",
radioButtons("b2_1_1", label = h4("How many dependent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)
),
conditionalPanel(
#B2111
condition = "input.b2_1_1 == 1",
radioButtons("b2_1_1_1", label = h4("How many independent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)),
conditionalPanel(
#B21111
condition = "input.b2_1_1_1 == 1",
radioButtons("b2_1_1_1_1", label = h4("Do you plan to include covariants into your analysis?"),
choices = list("Yes" = 1, "No" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_1_1_1_1 == 1",
h4("We suggest using ANCOVA."),
p("You assume that your data come from a normal distribution
You can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this first by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
p("You assume that the sample cases are independent from each other."),
p("You assume independence of the independent variable and covariate
You can test this with a T-test (for 2 conditions) or an ANOVA ( >2 conditions) using the covariate as the dependent variable. The result should not be significant at an alpha level of 0.05.")
),
conditionalPanel(
condition = "input.b2_1_1_1_1 == 2",
h4("We suggest using T-Test."),
p("- You assume that the data of your dependent variable come from a normal distribution.
You can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05.")
)
),
#ANCOVA/T-Test
conditionalPanel(
#B21112
condition = "input.b2_1_1_1 == 2",
radioButtons("b2_1_1_1_2", label = h4("Do you plan to include covariants into your analysis?"),
choices = list("Yes" = 1, "No" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_1_1_1_2 == 1",
h4("We suggest using ANCOVA."),
p("You assume that your data come from a normal distribution
You can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this first by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
p("You assume that the sample cases are independent from each other."),
p("You assume independence of the independent variable and covariate
You can test this with a T-test (for 2 conditions) or an ANOVA ( >2 conditions) using the covariate as the dependent variable. The result should not be significant at an alpha level of 0.05.")
),
conditionalPanel(
condition = "input.b2_1_1_1_2 == 2",
h4("We suggest using X-way ANOVA."),
p("- You assume that your data come from a normal distribution
You can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
p("- You assume that the sample cases are independent from each other.
")
)
)
#ANCOVA/X-way ANOVA
),
conditionalPanel(
#B2112
condition = "input.b2_1_1 == 2",
radioButtons("b2_1_1_2", label = h4("How many independent variables do you assess?"),
choices = list("One"=1, "More than one" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
#B21121
condition = "input.b2_1_1_2 == 1",
radioButtons("b2_1_1_2_1", label = h4("Do you plan to include covariants into your analysis?"),
choices = list("Yes" = 1, "No" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_1_1_2_1 == 1",
h4("We suggest using MANCOVA."),
p("- You assume that your data come from a multivariate normal distribution
First, you can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05.
For testing multivariate normality we advise the MVN package in R"),
p(" - You assume multivariate homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this first by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05.
Then you can test for multivariate homogeneity of variance by using the Box??s M Test. The result should not be significant at an alpha level of 0.001."),
p("- You assume absence of multicolliniarity. This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."
)
),
conditionalPanel(
condition = "input.b2_1_1_2_1 == 2",
h4("We suggest using MANCOVA."),
p("- You assume that your data come from a multivariate normal distribution
First, you can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05.
For testing multivariate normality we advise the MVN package in R"),
p(" - You assume multivariate homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this first by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05.
Then you can test for multivariate homogeneity of variance by using the Box??s M Test. The result should not be significant at an alpha level of 0.001."),
p("- You assume absence of multicolliniarity. This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."
)
)
),
#MANCOVA/Univariate MANOVA
conditionalPanel(
#B21122
condition = "input.b2_1_1_2 == 2",
radioButtons("b2_1_1_2_2", label = h4("Do you plan to include covariants into your analysis?"),
choices = list("Yes" = 1, "No" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_1_1_2_2 == 1",
h4("We suggest using MANCOVA."),
p("- You assume that your data come from a multivariate normal distribution
First, you can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05.
For testing multivariate normality we advise the MVN package in R"),
p(" - You assume multivariate homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this first by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05.
Then you can test for multivariate homogeneity of variance by using the Box??s M Test. The result should not be significant at an alpha level of 0.001."),
p("- You assume absence of multicolliniarity. This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."
)
),
conditionalPanel(
condition = "input.b2_1_1_2_2 == 2",
h4("We suggest using Multivariate MANOVA."),
p("You assume that your data come from a multivariate normal distribution
First, you can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05.
For testing multivariate normality we advise the MVN package in R"),
p("You assume multivariate homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this first by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05.
Then you can test for multivariate homogeneity of variance by using the Box???s M Test. The result should not be significant at an alpha level of 0.001."),
p("You assume absence of multicolliniarity. This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10.")
)
#Multivariate MANCOVA/Multivariate MANOVA
)
)
),
conditionalPanel(
#Branch 212
condition = "input.b2_1 == 2",
radioButtons("b2_1_2", label = h4("How many dependent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)
),
conditionalPanel(
#B2121
condition = "input.b2_1_2 == 1",
radioButtons("b2_1_2_1", label = h4("How many independent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_1_2_1 == 1",
h4("We suggest using Chi Square Test."),
p("- Your sample size should be big enough, this is especially important for the chi-square test. In no more than 20% of the contingency table the expected counts can be less than 5, and in every cell there should be at least one expected observation.")
),
conditionalPanel(
condition = "input.b2_1_2_1 == 2",
h4("We suggest using Chi Square Test."),
p("- Your sample size should be big enough, this is especially important for the chi-square test. In no more than 20% of the contingency table the expected counts can be less than 5, and in every cell there should be at least one expected observation.")
)
#Chi-square/Chi-square
),
conditionalPanel(
#B2122
condition = "input.b2_1_2 == 2",
radioButtons("b2_1_2_2", label = h4("How many independent variables do you assess?"),
choices = list("One"=1, "More than one" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_1_2_2 == 1",
h4("We suggest using Chi Square Test."),
p("- Your sample size should be big enough, this is especially important for the chi-square test. In no more than 20% of the contingency table the expected counts can be less than 5, and in every cell there should be at least one expected observation.")
),
conditionalPanel(
condition = "input.b2_1_2_2 == 2",
h4("We suggest using Chi Square Test."),
p("- Your sample size should be big enough, this is especially important for the chi-square test. In no more than 20% of the contingency table the expected counts can be less than 5, and in every cell there should be at least one expected observation.")
)
#Chi-square/Chi-square
)
)
),
conditionalPanel(
#Branch 22
condition = "input.b2 == 2",
radioButtons("b2_2", label = h4("Is your dependent variable continuous or discrete?"),
choices = list("Continuous" = 1, "Discrete" = 2), selected = character(0)),
conditionalPanel(
#Branch 221
condition = "input.b2_2 == 1",
radioButtons("b2_2_1", label = h4("How many dependent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)),
conditionalPanel(
#B2211
condition = "input.b2_2_1 == 1",
radioButtons("b2_2_1_1", label = h4("How many independent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)),
conditionalPanel(
#B22111
condition = "input.b2_2_1_1 == 1",
radioButtons("b2_2_1_1_1", label = h4("Do you plan to include covariants into your analysis?"),
choices = list("Yes" = 1, "No" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_2_1_1_1 == 1",
h4("We suggest using ANCOVA."),
p("You assume that your data come from a normal distribution
You can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this first by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
p("You assume that the sample cases are independent from each other."),
p("You assume independence of the independent variable and covariate
You can test this with a T-test (for 2 conditions) or an ANOVA ( >2 conditions) using the covariate as the dependent variable. The result should not be significant at an alpha level of 0.05.")
),
conditionalPanel(
condition = "input.b2_2_1_1_1 == 2",
h4("We suggest using Univariate ANOVA."),
p("- You assume that your data come from a normal distribution
You can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
p("- You assume that the sample cases are independent from each other.
")
)
),
#ANCOVA/Univariate ANOVA
conditionalPanel(
#B22112
condition = "input.b2_2_1_1 == 2",
radioButtons("b2_2_1_1_2", label = h4("Do you plan to include covariants into your analysis?"),
choices = list("Yes" = 1, "No" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_2_1_1_2 == 1",
h4("We suggest using ANCOVA."),
p("You assume that your data come from a normal distribution
You can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this first by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
p("You assume that the sample cases are independent from each other."),
p("You assume independence of the independent variable and covariate
You can test this with a T-test (for 2 conditions) or an ANOVA ( >2 conditions) using the covariate as the dependent variable. The result should not be significant at an alpha level of 0.05.")
),
conditionalPanel(
condition = "input.b2_2_1_1_2 == 2",
h4("We suggest using X-way ANOVA."),
p("- You assume that your data come from a normal distribution
You can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
p("- You assume that the sample cases are independent from each other.
")
)
)
#ANCOVA/X-way ANOVA
),
conditionalPanel(
#B2212
condition = "input.b2_2_1 == 2",
radioButtons("b2_2_1_2", label = h4("How many independent variables do you assess?"),
choices = list("One"=1, "More than one" = 2), selected = character(0)),
conditionalPanel(
#B22121
condition = "input.b2_2_1_2 == 1",
radioButtons("b2_2_1_2_1", label = h4("Do you plan to include covariants into your analysis?"),
choices = list("Yes" = 1, "No" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_2_1_2_1 == 1",
h4("We suggest using MANCOVA."),
p("- You assume that your data come from a multivariate normal distribution
First, you can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05.
For testing multivariate normality we advise the MVN package in R"),
p(" - You assume multivariate homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this first by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05.
Then you can test for multivariate homogeneity of variance by using the Box??s M Test. The result should not be significant at an alpha level of 0.001."),
p("- You assume absence of multicolliniarity. This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."
)
),
conditionalPanel(
condition = "input.b2_2_1_2_1 == 2",
h4("We suggest using Multivariate ANOVA."),
p("- You assume that your data come from a normal distribution
You can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05."),
p("- You assume homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05."),
p("- You assume that the sample cases are independent from each other.
")
)
),
#MANCOVA/Multivariate ANOVA
conditionalPanel(
#B22122
condition = "input.b2_2_1_2 == 2",
radioButtons("b2_2_1_2_2", label = h4("Do you plan to include covariants into your analysis?"),
choices = list("Yes" = 1, "No" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_2_1_2_2 == 1",
h4("We suggest using MANCOVA."),
p("- You assume that your data come from a multivariate normal distribution
First, you can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05.
For testing multivariate normality we advise the MVN package in R"),
p(" - You assume multivariate homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this first by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05.
Then you can test for multivariate homogeneity of variance by using the Box??s M Test. The result should not be significant at an alpha level of 0.001."),
p("- You assume absence of multicolliniarity. This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10."
)
),
conditionalPanel(
condition = "input.b2_2_1_2_2 == 2",
h4("We suggest using X-way MANOVA."),
p("You assume that your data come from a multivariate normal distribution
First, you can test univariate normality by using the Shapiro-Wilk test. The result should not be significant at an alpha level of 0.05.
For testing multivariate normality we advise the MVN package in R"),
p("You assume multivariate homogeneity of variance. This means that you assume that the variance between the groups is equal. You can test this first by using the Levene???s Test of Equality of Variance. The result should not be significant at an alpha level of 0.05.
Then you can test for multivariate homogeneity of variance by using the Box???s M Test. The result should not be significant at an alpha level of 0.001."),
p("You assume absence of multicolliniarity. This means that the dependent variables cannot be correlated to each other. Tabachnick & Fidell (2012) suggest that no correlation should be above r = .90. You can also check this by computing the Variance Inflation Function which should not be bigger than 10.")
)
#MANCOVA/x-way MANOVA
)
)
),
conditionalPanel(
#Branch 222
condition = "input.b2_2 == 2",
radioButtons("b2_2_2", label = h4("How many dependent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)),
conditionalPanel(
#B2221
condition = "input.b2_2_2 == 1",
radioButtons("b2_2_2_1", label = h4("How many dependent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_2_2_1 == 1",
h4("We suggest using Chi Square Test."),
p("- Your sample size should be big enough, this is especially important for the chi-square test. In no more than 20% of the contingency table the expected counts can be less than 5, and in every cell there should be at least one expected observation.")
),
conditionalPanel(
condition = "input.b2_2_2_1 == 2",
h4("We suggest using Chi Square Test."),
p("- Your sample size should be big enough, this is especially important for the chi-square test. In no more than 20% of the contingency table the expected counts can be less than 5, and in every cell there should be at least one expected observation.")
)
),
#Chi square test/Chi square test
conditionalPanel(
#B2222
condition = "input.b2_2_2 == 2",
radioButtons("b2_2_2_2", label = h4("How many dependent variables do you assess?"),
choices = list("One" = 1, "More than one" = 2), selected = character(0)),
tags$hr(),
conditionalPanel(
condition = "input.b2_2_2_2 == 1",
h4("We suggest using Chi Square Test."),
p("- Your sample size should be big enough, this is especially important for the chi-square test. In no more than 20% of the contingency table the expected counts can be less than 5, and in every cell there should be at least one expected observation.")
),
conditionalPanel(
condition = "input.b2_2_2_2 == 2",
h4("We suggest using Chi Square Test."),
p("- Your sample size should be big enough, this is especially important for the chi-square test. In no more than 20% of the contingency table the expected counts can be less than 5, and in every cell there should be at least one expected observation.")
)
#Chi square test/Chi square test
)
)
)
)
),
####################
tabPanel("Power-Analysis",
fluidRow(
column(12,
wellPanel(
helpText("The next step is to calculate the sample size that is implied by a certain power you want to achieve.
Please check the boxes, if applicable."),
checkboxInput(inputId = "clustered",label = "Clustered Design?", value = FALSE),
checkboxInput(inputId = "binary",label = "Binary Dependent Variable?", value = FALSE)
)),
column(4,
wellPanel(
conditionalPanel(
condition = "input.clustered == false & input.binary == false",
selectInput("alpha", "Significance Level",c("Alpha = 0.01", "Alpha = 0.05", "Alpha = 0.10"),selected="Alpha = 0.05"),
numericInput("tau", "Treatment Effect Size", value = 5, min = 0, max = 40),
numericInput("sigma", "Standard Deviation of Outcome Variable", value = 10, min = 0, max = 30),
sliderInput("target", "Power Target", value = .8, min = 0, max = 1),
numericInput("maxn", "Maximum Number of Subjects", value = 2000, min = 0, max = 10000000)
),
conditionalPanel(
condition = "input.clustered == false & input.binary == true",
selectInput("alpha_b", "Significance Level",c("Alpha = 0.01", "Alpha = 0.05", "Alpha = 0.10"),selected="Alpha = 0.05"),
numericInput("p0_b", "Proportion (DV = 1) in Control Group", value = .5, min = 0, max = 1),
numericInput("p1_b", "Proportion (DV = 1) in Treatment Group", value = .65, min = 0, max = 1),
sliderInput("target_b", "Power Target", value = .8, min = 0, max = 1),
numericInput("maxn_b", "Maximum Number of Subjects", value = 2000, min = 0, max = 10000000)
),
conditionalPanel(
condition = "input.clustered == true & input.binary == false",
selectInput("alpha_c", "Significance Level",c("Alpha = 0.01", "Alpha = 0.05", "Alpha = 0.10"),selected="Alpha = 0.05"),
numericInput("tau_c", "Treatment Effect Size", value = 5, min = 0, max = 40),
numericInput("sigma_c", "Standard Deviation of Outcome Variable", value = 10, min = 0, max = 30),
sliderInput("ICC_c", "Intra-cluster Correlation", value = .5, min = 0, max = 1),
numericInput("n_clus_per_arm_c", "Number of Clusters per Arm", value = 40, min = 0, max = 200),
sliderInput("target_c", "Power Target", value = .8, min = 0, max = 1),
numericInput("maxn_c", "Maximum Number of Subjects", value = 2000, min = 0, max = 10000000)
),
conditionalPanel(
condition = "input.clustered == true & input.binary == true",
selectInput("alpha_bc", "Significance Level",c("Alpha = 0.01", "Alpha = 0.05", "Alpha = 0.10"),selected="Alpha = 0.05"),
numericInput("p0_bc", "Proportion (DV = 1) in Control Group", value = .5, min = 0, max = 1),
numericInput("p1_bc", "Proportion (DV = 1) in Treatment Group", value = .65, min = 0, max = 1),
sliderInput("ICC_bc", "Intra-cluster Correlation", value = .5, min = 0, max = 1),
numericInput("n_clus_per_arm_bc", "Number of Clusters per Arm", value = 40, min = 0, max = 200),
sliderInput("target_bc", "Power Target", value = .8, min = 0, max = 1),
numericInput("maxn_bc", "Maximum Number of Subjects", value = 2000, min = 0, max = 10000000)
)
)),
column(8,
wellPanel(
plotOutput("powerplot")
)
)),
column(12,
wellPanel(
htmlOutput("nrequired")
))
),
tabPanel("About the app",
p(h4("Made by Seongjin Bien, Julian Burger, Gaby Lunansky, Francesca Freuli, Irene Sánchez, Maria Vlachou")),
p(h4("Power Analysis Shiny App made by Alexander Coppock"))
)
)
)
)
######################
## source("helpers.R")
Ns_small <- as.matrix(1:10000)
Ns_big <- as.matrix(c(seq(1, 9999, 1), seq(1000, 100000000, 1000)))
server <- shinyServer(
function(input, output) {
betas_fun <- reactive({
sigma <- input$sigma
tau <- input$tau
target <- input$target
maxn <- input$maxn
alpha <- switch(input$alpha,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
betas_small <- apply(X=Ns_small,MARGIN = 1,FUN = power_calculator, mu_t= (60 + tau), mu_c=60, sigma=sigma,alpha=alpha)
betas_big <- apply(X=Ns_big,MARGIN = 1,FUN = power_calculator, mu_t= (60 + tau), mu_c=60, sigma=sigma,alpha=alpha)
big <- (sum(betas_small>=target, na.rm = TRUE)==0 | maxn>10000)
return(list(betas_small=betas_small,betas_big=betas_big,big=big))
})
betas_fun_binary <- reactive({
p0 <- input$p0_b
p1 <- input$p1_b
target <- input$target_b
maxn <- input$maxn_b
alpha <- switch(input$alpha_b,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
betas_small <- apply(X=Ns_small,MARGIN = 1,FUN = power_calculator_binary, p1=p1, p0=p0,alpha=alpha)
betas_big <- apply(X=Ns_big,MARGIN = 1,FUN =power_calculator_binary, p1=p1, p0=p0,alpha=alpha)
big <- (sum(betas_small>=target, na.rm = TRUE)==0 | maxn>10000)
return(list(betas_small=betas_small,betas_big=betas_big,big=big))
})
betas_fun_clus <- reactive({
sigma <- input$sigma_c
ICC <- input$ICC_c
tau <- input$tau_c
n_clus_per_arm <- input$n_clus_per_arm_c
target <- input$target_c
maxn <- input$maxn_c
alpha <- switch(input$alpha_c,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
betas_small <- apply(X=Ns_small,MARGIN = 1,FUN = power_calculator_cluster, mu_t= (60 + tau), mu_c=60, sigma=sigma, ICC=ICC,n_clus_per_arm=n_clus_per_arm,alpha=alpha)
betas_big <- apply(X=Ns_big,MARGIN = 1,FUN = power_calculator_cluster,mu_t= (60 + tau), mu_c=60, sigma=sigma, ICC=ICC,n_clus_per_arm=n_clus_per_arm,alpha=alpha)
big <- (sum(betas_small>=target, na.rm = TRUE)==0| maxn>10000)
return(list(betas_small=betas_small,betas_big=betas_big,big=big))
})
betas_fun_binary_clus <- reactive({
p0 <- input$p0_bc
p1 <- input$p1_bc
ICC <- input$ICC_bc
n_clus_per_arm <- input$n_clus_per_arm_bc
target <- input$target_bc
maxn <- input$maxn_bc
alpha <- switch(input$alpha_bc,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
betas_small <- apply(X=Ns_small,MARGIN = 1,FUN = power_calculator_binary_cluster, p0=p0, p1=p1, ICC=ICC,n_clus_per_arm=n_clus_per_arm,alpha=alpha)
betas_big <- apply(X=Ns_big,MARGIN = 1,FUN = power_calculator_binary_cluster, p0=p0, p1=p1, ICC=ICC,n_clus_per_arm=n_clus_per_arm,alpha=alpha)
big <- (sum(betas_small>=target, na.rm = TRUE)==0| maxn>10000)
return(list(betas_small=betas_small,betas_big=betas_big,big=big))
})
output$powerplot <- renderPlot({
if(input$clustered==FALSE & input$binary==FALSE){
sigma <- input$sigma
tau <- input$tau
target <- input$target
maxn <- input$maxn
alpha <- switch(input$alpha,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
results <- betas_fun()
if(!results$big){
plot(NA, ylim=c(0,1), xlim=c(0,maxn), main=paste0("Power Analysis: Hypothetical Treatment Effect = ",tau, "\nSD of outcome = ", sigma), ylab="Power (Probability of Statistical Significance)", xlab="Number of Subjects")
lines(Ns_small, results$betas_small, lwd=4,col="blue")
abline(h=target, col="red", lty=2)
}
if(results$big){
plot(NA, ylim=c(0,1), xlim=c(0,maxn), main=paste0("Power Analysis: Hypothetical Treatment Effect = ",tau, "\nSD of outcome = ", sigma), ylab="Power (Probability of Statistical Significance)", xlab="Number of Subjects")
lines(Ns_big, results$betas_big, lwd=4,col="blue")
abline(h=target, col="red", lty=2)
}
}
if(input$clustered==TRUE & input$binary==FALSE){
sigma <- input$sigma_c
ICC <- input$ICC_c
tau <- input$tau_c
n_clus_per_arm <- input$n_clus_per_arm_c
target <- input$target_c
alpha <- switch(input$alpha_c,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
maxn <- input$maxn_c
results <- betas_fun_clus()
if(!results$big){
plot(NA, ylim=c(0,1), xlim=c(0,maxn), main=paste0("Power Analysis: Hypothetical Treatment Effect = ",tau, "\nSD of outcome = ", sigma, "; ICC = ",ICC),
ylab="Power (Probability of Statistical Significance)", xlab="Number of Subjects")
lines(Ns_small, results$betas_small, lwd=4, col="green")
lines(Ns_small, apply(X=Ns_small,MARGIN = 1,FUN = power_calculator, mu_t= (60 + tau), mu_c=60, sigma=sigma,alpha=alpha), lwd=4, col="blue")
abline(h=target, col="red", lty=2)
legend("bottomright", legend=c("Equivalent Individual-Level Design", "Clustered Design"), col=c("blue", "green"), lwd=c(4,4), lty=c(1,1))
}
if(results$big){
plot(NA, ylim=c(0,1), xlim=c(0,maxn), main=paste0("Power Analysis: Hypothetical Treatment Effect = ",tau, "\nSD of outcome = ", sigma, "; ICC = ",ICC),
ylab="Power (Probability of Statistical Significance)", xlab="Number of Subjects")
lines(Ns_big, results$betas_big, lwd=4, col="green")
lines(Ns_big, apply(X=Ns_big,MARGIN = 1,FUN = power_calculator, mu_t= (60 + tau), mu_c=60, sigma=sigma,alpha=alpha), lwd=4, col="blue")
abline(h=target, col="red", lty=2)
legend("bottomright", legend=c("Equivalent Individual-Level Design", "Clustered Design"), col=c("blue", "green"), lwd=c(4,4), lty=c(1,1))
}
}
# binary
if(input$clustered==FALSE & input$binary==TRUE){
p0 <- input$p0_b
p1 <- input$p1_b
target <- input$target_b
maxn <- input$maxn_b
alpha <- switch(input$alpha_b,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
results <- betas_fun_binary()
if(!results$big){
plot(NA, ylim=c(0,1), xlim=c(0,maxn), main=paste0("Power Analysis: Hypothetical Treatment Effect = ",round((p1-p0),3)," Percentage Points"),
ylab="Power (Probability of Statistical Significance)", xlab="Number of Subjects")
lines(Ns_small, results$betas_small, lwd=4,col="blue")
abline(h=target, col="red", lty=2)
}
if(results$big){
plot(NA, ylim=c(0,1), xlim=c(0,maxn), main=paste0("Power Analysis: Hypothetical Treatment Effect = ",round((p1-p0),3)," Percentage Points"),
ylab="Power (Probability of Statistical Significance)", xlab="Number of Subjects")
lines(Ns_big, results$betas_big, lwd=4,col="blue")
abline(h=target, col="red", lty=2)
}
}
if(input$clustered==TRUE & input$binary==TRUE){
p0 <- input$p0_bc
p1 <- input$p1_bc
ICC <- input$ICC_bc
n_clus_per_arm <- input$n_clus_per_arm_bc
target <- input$target_bc
alpha <- switch(input$alpha_bc,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
maxn <- input$maxn_bc
results <- betas_fun_binary_clus()
if(!results$big){
plot(NA, ylim=c(0,1), xlim=c(0,maxn), main=paste0("Power Analysis: Hypothetical Treatment Effect = ",round(abs(p1-p0),3), " Percentage Points; \n ICC = ",ICC),
ylab="Power (Probability of Statistical Significance)", xlab="Number of Subjects")
lines(Ns_small, results$betas_small, lwd=4, col="green")
lines(Ns_small, apply(X=Ns_small,MARGIN = 1,FUN = power_calculator_binary, p1=p1, p0=p0,alpha=alpha), lwd=4, col="blue")
abline(h=target, col="red", lty=2)
legend("bottomright", legend=c("Equivalent Individual-Level Design", "Clustered Design"), col=c("blue", "green"), lwd=c(4,4), lty=c(1,1))
}
if(results$big){
plot(NA, ylim=c(0,1), xlim=c(0,maxn), main=paste0("Power Analysis: Hypothetical Treatment Effect = ",round(abs(p1-p0),3), " Percentage Points; \n ICC = ",ICC),
ylab="Power (Probability of Statistical Significance)", xlab="Number of Subjects")
lines(Ns_big, results$betas_big, lwd=4, col="green")
lines(Ns_big, apply(X=Ns_big,MARGIN = 1,FUN = power_calculator_binary, p1=p1, p0=p0,alpha=alpha), lwd=4, col="blue")
abline(h=target, col="red", lty=2)
legend("bottomright", legend=c("Equivalent Individual-Level Design", "Clustered Design"), col=c("blue", "green"), lwd=c(4,4), lty=c(1,1))
}
}
})
output$nrequired <- renderUI({
if(input$clustered==FALSE & input$binary==FALSE){
sigma <- input$sigma
tau <- input$tau
target <- input$target
alpha <- switch(input$alpha,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
results <- betas_fun()
if(!results$big){
nrequired <-Ns_small[which.max(results$betas_small>=target)]
}
if(results$big){
nrequired <-Ns_big[which.max(results$betas_big>=target)]
}
str1 <- paste0("In order to achieve ", target*100, "% power, you'll need to use a sample size of at least ", nrequired,".")
if(sum(results$betas_big>=target, na.rm=TRUE)==0){str1 <- paste0("In order to achieve ", target*100, "% power, you'll need to use a sample size of well more than 10,000,000")}
}
if(input$clustered==TRUE & input$binary==FALSE){
sigma <- input$sigma_c
ICC <- input$ICC_c
tau <- input$tau_c
n_clus_per_arm <- input$n_clus_per_arm_c
target <- input$target_c
alpha <- switch(input$alpha_c,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
maxn <- input$maxn_c
results <- betas_fun_clus()
if(!results$big){
nrequired <-Ns_small[which.max(results$betas_small>=target)]
}
if(results$big){
nrequired <-Ns_big[which.max(results$betas_big>=target)]
}
str1 <- paste0("In order to achieve ", target*100, "% power, you'll need to use a sample size of at least ", nrequired,", or an average of at least ", round(nrequired/(n_clus_per_arm*2)), " subjects in each of ", n_clus_per_arm*2, " clusters. Right-click to download image.")
if(sum(results$betas_big>=target, na.rm=TRUE)==0){str1 <- paste0("In order to achieve ", target*100, "% power, you'll need to use a sample size of well more than 10,000,000. You may need to increase the number of clusters.")}
}
# Binary
if(input$clustered==FALSE & input$binary==TRUE){
p0 <- input$p0_b
p1 <- input$p1_b
target <- input$target_b
alpha <- switch(input$alpha_b,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
results <- betas_fun_binary()
if(!results$big){
nrequired <-Ns_small[which.max(results$betas_small>=target)]
}
if(results$big){
nrequired <-Ns_big[which.max(results$betas_big>=target)]
}
str1 <- paste0("In order to achieve ", target*100, "% power, you'll need to use a sample size of at least ", nrequired,".")
if(sum(results$betas_big>=target, na.rm=TRUE)==0){str1 <- paste0("In order to achieve ", target*100, "% power, you'll need to use a sample size of well more than 10,000,000")}
}
if(input$clustered==TRUE & input$binary==TRUE){
p0 <- input$p0_bc
p1 <- input$p1_bc
ICC <- input$ICC_bc
n_clus_per_arm <- input$n_clus_per_arm_bc
target <- input$target_bc
alpha <- switch(input$alpha_bc,
"Alpha = 0.01" = 0.01,
"Alpha = 0.05" = 0.05,
"Alpha = 0.10" = 0.10)
maxn <- input$maxn_bc
results <- betas_fun_binary_clus()
if(!results$big){
nrequired <-Ns_small[which.max(results$betas_small>=target)]
}
if(results$big){
nrequired <-Ns_big[which.max(results$betas_big>=target)]
}
str1 <- paste0("In order to achieve ", target*100, "% power, you'll need to use a sample size of at least ", nrequired,", or an average of at least ", round(nrequired/(n_clus_per_arm*2)), " subjects in each of ", n_clus_per_arm*2, " clusters. Right-click to download image. This power calculator was designed by Alexander Coppock, http://www.https://alexandercoppock.com")
if(sum(results$betas_big>=target, na.rm=TRUE)==0){str1 <- paste0("In order to achieve ", target*100, "% power, you'll need to use a sample size of well more than 10,000,000. You may need to increase the number of clusters.This power calculator was designed by Alexander Coppock, http://www.https://alexandercoppock.com")}
}
HTML(str1)
#Idea: The checkboxes will turn on/off variables that will be included in the script. When false, var = 0
################################
#covar1 <- reactive({input$age * 1 * 9}) #this 9 is arbitrary
#covar2 <- reactive({input$dur * 1 * 9})
#covar3 <- reactive({input$edu * 1 * 9})
#covar4 <- reactive({input$inc * 1 * 9})
################################
#Testing value
#output$text1 <- renderPrint({input$age * 1})
##script here
##Plot code
})
})
shinyApp(ui = ui, server = server)
######################