I currently have matrices returned from openms and encyclopedia. Lets see how similar they are.
I think I will just load an existing expressionset generated from openswath.
osw_matrix <- new.env()
loaded <- load("protein_expt-v20181112.rda", envir=osw_matrix)
osw_matrix <- osw_matrix[["expt"]]
osw_counts <- exprs(osw_matrix)
colnames(osw_counts)
## [1] "s2018_0817BrikenTrypsinDIA01" "s2018_0817BrikenTrypsinDIA02"
## [3] "s2018_0817BrikenTrypsinDIA03" "s2018_0817BrikenTrypsinDIA11"
## [5] "s2018_0817BrikenTrypsinDIA12" "s2018_0817BrikenTrypsinDIA13"
## [7] "s2018_0817BrikenTrypsinDIA07" "s2018_0817BrikenTrypsinDIA08"
## [9] "s2018_0817BrikenTrypsinDIA09" "s2018_0817BrikenTrypsinDIA17"
## [11] "s2018_0817BrikenTrypsinDIA18" "s2018_0817BrikenTrypsinDIA19"
enc_matrix <- read.table("results/encyclopedia_quant_report.elib.proteins.txt", header=TRUE)
rownames(enc_matrix) <- enc_matrix[["Protein"]]
enc_matrix <- enc_matrix[, -1]
enc_matrix <- enc_matrix[, -1]
enc_matrix <- enc_matrix[, -1]
colnames(enc_matrix)
## [1] "X2018_0817BrikenTrypsinDIA01.mzML"
## [2] "X2018_0817BrikenTrypsinDIA02.mzML"
## [3] "X2018_0817BrikenTrypsinDIA03.mzML"
## [4] "X2018_0817BrikenTrypsinDIA07.mzML"
## [5] "X2018_0817BrikenTrypsinDIA08.mzML"
## [6] "X2018_0817BrikenTrypsinDIA09.mzML"
## [7] "X2018_0817BrikenTrypsinDIA11.mzML"
## [8] "X2018_0817BrikenTrypsinDIA12.mzML"
## [9] "X2018_0817BrikenTrypsinDIA13.mzML"
## [10] "X2018_0817BrikenTrypsinDIA17.mzML"
## [11] "X2018_0817BrikenTrypsinDIA18.mzML"
## [12] "X2018_0817BrikenTrypsinDIA19.mzML"
colnames(enc_matrix) <- gsub(pattern="X", replacement="s", x=colnames(enc_matrix))
colnames(enc_matrix) <- gsub(pattern="\\.mzML", replacement="", x=colnames(enc_matrix))
col_idx <- colnames(enc_matrix)
osw_counts <- osw_counts[, col_idx]
all_counts <- merge(osw_counts, enc_matrix, by="row.names")
rownames(all_counts) <- all_counts[["Row.names"]]
all_counts <- all_counts[, -1]
colnames(all_counts)
## [1] "s2018_0817BrikenTrypsinDIA01.x" "s2018_0817BrikenTrypsinDIA02.x"
## [3] "s2018_0817BrikenTrypsinDIA03.x" "s2018_0817BrikenTrypsinDIA07.x"
## [5] "s2018_0817BrikenTrypsinDIA08.x" "s2018_0817BrikenTrypsinDIA09.x"
## [7] "s2018_0817BrikenTrypsinDIA11.x" "s2018_0817BrikenTrypsinDIA12.x"
## [9] "s2018_0817BrikenTrypsinDIA13.x" "s2018_0817BrikenTrypsinDIA17.x"
## [11] "s2018_0817BrikenTrypsinDIA18.x" "s2018_0817BrikenTrypsinDIA19.x"
## [13] "s2018_0817BrikenTrypsinDIA01.y" "s2018_0817BrikenTrypsinDIA02.y"
## [15] "s2018_0817BrikenTrypsinDIA03.y" "s2018_0817BrikenTrypsinDIA07.y"
## [17] "s2018_0817BrikenTrypsinDIA08.y" "s2018_0817BrikenTrypsinDIA09.y"
## [19] "s2018_0817BrikenTrypsinDIA11.y" "s2018_0817BrikenTrypsinDIA12.y"
## [21] "s2018_0817BrikenTrypsinDIA13.y" "s2018_0817BrikenTrypsinDIA17.y"
## [23] "s2018_0817BrikenTrypsinDIA18.y" "s2018_0817BrikenTrypsinDIA19.y"
for (i in colnames(all_counts)) {
all_counts[[i]] <- as.numeric(all_counts[[i]])
}
all_counts <- log2(all_counts + 1)
cor.test(all_counts[[1]], all_counts[[13]], method="spearman")
## Warning in cor.test.default(all_counts[[1]], all_counts[[13]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[1]] and all_counts[[13]]
## S = 4.3e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.8133
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
zero_mtrx <- as.data.frame(tt$scatter$data == 0)
colnames(zero_mtrx) <- c("osw", "enc")
zero_mtrx[["label"]] <- NULL
none_set <- rowSums(zero_mtrx) == 2
sum(none_set)
## [1] 784
## [1] 622
## [1] 986
zero_mtrx[["none"]] <- none_set
zero_mtrx[["one"]] <- one_set
zero_mtrx[["both"]] <- both_set
osw_missing <- zero_mtrx[["osw"]] & zero_mtrx[["one"]]
zero_mtrx[["osw_missing"]] <- osw_missing
sum(osw_missing)
## [1] 572
enc_missing <- zero_mtrx[["enc"]] & zero_mtrx[["one"]]
zero_mtrx[["enc_missing"]] <- enc_missing
sum(enc_missing)
## [1] 50
## Warning in cor.test.default(all_counts[[2]], all_counts[[14]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[2]] and all_counts[[14]]
## S = 3.8e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.8327
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
## Warning in cor.test.default(all_counts[[3]], all_counts[[15]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[3]] and all_counts[[15]]
## S = 4.2e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.8173
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
## Warning in cor.test.default(all_counts[[4]], all_counts[[16]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[4]] and all_counts[[16]]
## S = 5.2e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.7721
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale(*, initial_scale =
## 0) -> final scale = 0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale(*, initial_scale =
## 0) -> final scale = 0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale(*, initial_scale =
## 0) -> final scale = 0
## Warning in lmrob.S(x, y, control = control): find_scale(*, initial_scale =
## 0) -> final scale = 0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale(*, initial_scale =
## 0) -> final scale = 0
## Warning in lmrob.S(x, y, control = control): find_scale(*, initial_scale =
## 0) -> final scale = 0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in outlierStats(ret, x, control): Detected possible local breakdown of SMDM-estimate in 2 coefficients 'Overall', ''.
## Use lmrob argument 'setting="KS2014"' to avoid this problem.
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
## Warning in cor.test.default(all_counts[[5]], all_counts[[17]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[5]] and all_counts[[17]]
## S = 4e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.8254
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in lmrob.S(x, y, control = control): find_scale() did not converge
## in 'maxit.scale' (= 200) iterations with tol=1e-10, last rel.diff=0
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
## Warning in cor.test.default(all_counts[[6]], all_counts[[18]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[6]] and all_counts[[18]]
## S = 4e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.8236
## Warning in cor.test.default(all_counts[[7]], all_counts[[19]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[7]] and all_counts[[19]]
## S = 1.6e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.9318
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
## Warning in cor.test.default(all_counts[[8]], all_counts[[20]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[8]] and all_counts[[20]]
## S = 1.6e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.929
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
## Warning in cor.test.default(all_counts[[9]], all_counts[[21]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[9]] and all_counts[[21]]
## S = 1.5e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.9334
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
## Warning in cor.test.default(all_counts[[10]], all_counts[[22]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[10]] and all_counts[[22]]
## S = 2.3e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.8979
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
## Warning in cor.test.default(all_counts[[11]], all_counts[[23]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[11]] and all_counts[[23]]
## S = 1.7e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.9269
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
## Warning in cor.test.default(all_counts[[12]], all_counts[[24]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[12]] and all_counts[[24]]
## S = 1.8e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.9194
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
Ok so that was 100% weird. Let us next NA all the entries which are currently 0.
I think that somewhere along the way some set of samples got mis-ordered?
na_idx <- all_counts == 0
all_counts[na_idx] <- NA
cor.test(all_counts[[7]], all_counts[[19]], method="spearman")
##
## Spearman's rank correlation rho
##
## data: all_counts[[7]] and all_counts[[19]]
## S = 1.2e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.9363
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
## Warning in cor.test.default(all_counts[[8]], all_counts[[20]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[8]] and all_counts[[20]]
## S = 1.2e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.9347
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
##
## Spearman's rank correlation rho
##
## data: all_counts[[9]] and all_counts[[21]]
## S = 1.2e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.9374
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
## Warning in cor.test.default(all_counts[[10]], all_counts[[22]], method =
## "spearman"): Cannot compute exact p-value with ties
##
## Spearman's rank correlation rho
##
## data: all_counts[[10]] and all_counts[[22]]
## S = 1.4e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.9078
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
##
## Spearman's rank correlation rho
##
## data: all_counts[[11]] and all_counts[[23]]
## S = 1.4e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.9273
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
##
## Spearman's rank correlation rho
##
## data: all_counts[[12]] and all_counts[[24]]
## S = 1.4e+08, p-value <2e-16
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.9231
## Warning in plot_multihistogram(df): NAs introduced by coercion
## Used Bon Ferroni corrected t test(s) between columns.
Genefilter is a filtering package for rnaseq data. It has a large series of metrics for choosing parameters to help choose which data points to remove. I think some of these parameters are likely to be of interest for this data and might handle this observation bias.
expressionset <- expt[["expressionset"]]
ready <- expressionset_to_deseq(expressionset)
cds <- DESeq2::estimateSizeFactors(ready)
cds <- DESeq2::estimateDispersions(cds)
fit1 <- DESeq2:::nbinomWaldTest(cds)
filterstat <- BiocGenerics::rowMeans(DESeq2::counts(cds))
pvalues <- DESeq2::results(fit1)[["pvalue"]]
res <- data.frame(
filterstat = filterstat,
pvalue = pvalues,
row.names = names(cds))
with(res,
plot(rank(filterstat)/length(filterstat), -log10(pvalue), pch=16, cex=0.45))
trsf = function(n) log10(n+1)
plot(ecdf(trsf(res$filterstat)), xlab=body(trsf), main="")
badfilter = as.numeric(gsub("[+]*FBgn", "", rownames(res)))
plot(rank(badfilter)/length(badfilter), -log10(res$pvalue), pch=16, cex=0.45)
theta = seq(from=0, to=0.5, by=0.1)
pBH = genefilter::filtered_p(filter=res$filterstat, test=res$pvalue, theta=theta, method="BH")
genefilter::rejection_plot(pBH, at="sample",
xlim=c(0, 0.5), ylim=c(0, 2000),
xlab="FDR cutoff (Benjamini & Hochberg adjusted p-value)", main="")
loaded <- load("protein_expt-v20181112.rda")
whole <- subset_expt(expt, subset="collectiontype=='whole'")
## There were 12, now there are 6 samples.
## There were 12, now there are 6 samples.
## This function will replace the expt$expressionset slot with:
## pofa(data)
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Leaving the data in its current base format, keep in mind that
## some metrics are easier to see when the data is log2 transformed, but
## EdgeR/DESeq do not accept transformed data.
## Leaving the data unconverted. It is often advisable to cpm/rpkm
## the data to normalize for sampling differences, keep in mind though that rpkm
## has some annoying biases, and voom() by default does a cpm (though hpgl_voom()
## will try to detect this).
## Leaving the data unnormalized. This is necessary for DESeq, but
## EdgeR/limma might benefit from normalization. Good choices include quantile,
## size-factor, tmm, etc.
## Not correcting the count-data for batch effects. If batch is
## included in EdgerR/limma's model, then this is probably wise; but in extreme
## batch effects this is a good parameter to play with.
## Step 1: performing count filter with option: pofa
## Removing 621 low-count genes (1982 remaining).
## Step 2: not normalizing the data.
## Step 3: not converting the data.
## Step 4: not transforming the data.
## Step 5: not doing batch correction.
## This function will replace the expt$expressionset slot with:
## pofa(data)
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Leaving the data in its current base format, keep in mind that
## some metrics are easier to see when the data is log2 transformed, but
## EdgeR/DESeq do not accept transformed data.
## Leaving the data unconverted. It is often advisable to cpm/rpkm
## the data to normalize for sampling differences, keep in mind though that rpkm
## has some annoying biases, and voom() by default does a cpm (though hpgl_voom()
## will try to detect this).
## Leaving the data unnormalized. This is necessary for DESeq, but
## EdgeR/limma might benefit from normalization. Good choices include quantile,
## size-factor, tmm, etc.
## Not correcting the count-data for batch effects. If batch is
## included in EdgerR/limma's model, then this is probably wise; but in extreme
## batch effects this is a good parameter to play with.
## Step 1: performing count filter with option: pofa
## Removing 2025 low-count genes (578 remaining).
## Step 2: not normalizing the data.
## Step 3: not converting the data.
## Step 4: not transforming the data.
## Step 5: not doing batch correction.
## This function will replace the expt$expressionset slot with:
## log2(cpm(quant(cbcb(data))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Not correcting the count-data for batch effects. If batch is
## included in EdgerR/limma's model, then this is probably wise; but in extreme
## batch effects this is a good parameter to play with.
## Step 1: performing count filter with option: cbcb
## Removing 0 low-count genes (1982 remaining).
## Step 2: normalizing the data with quant.
## Using normalize.quantiles.robust due to a thread error in preprocessCore.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## Step 5: not doing batch correction.
## Plotting a PCA before surrogates/batch inclusion.
## Assuming no batch in model for testing pca.
## Starting basic_pairwise().
## Starting basic pairwise comparison.
## Leaving the data alone, regardless of normalization state.
## Basic step 0/3: Transforming data.
## Basic step 1/3: Creating median and variance tables.
## Basic step 2/3: Performing 3 comparisons.
## Basic step 3/3: Creating faux DE Tables.
## Basic: Returning tables.
## Starting deseq_pairwise().
## Starting DESeq2 pairwise comparisons.
## About to round the data, this is a pretty terrible thing to do. But if you, like me, want to see what happens when you put non-standard data into deseq, then here you go.
## Warning in choose_binom_dataset(input, force = force): This data was
## inappropriately forced into integers.
## Choosing the non-intercept containing model.
## DESeq2 step 1/5: Including only condition in the deseq model.
## Warning in import_deseq(data, column_data, model_string, tximport =
## input[["tximport"]][["raw"]]): Converted down 233 elements because they are
## larger than the maximum integer size.
## converting counts to integer mode
## DESeq2 step 2/5: Estimate size factors.
## DESeq2 step 3/5: Estimate dispersions.
## gene-wise dispersion estimates
## mean-dispersion relationship
## final dispersion estimates
## Using a parametric fitting seems to have worked.
## DESeq2 step 4/5: nbinomWaldTest.
## Starting ebseq_pairwise().
## The data should be suitable for EdgeR/DESeq/EBSeq. If they freak out, check the state of the count table and ensure that it is in integer counts.
## Starting EBSeq pairwise subset.
## Choosing the non-intercept containing model.
## Starting EBTest of delta_whole vs. wt_whole.
## Copying ppee values as ajusted p-values until I figure out how to deal with them.
## Starting edger_pairwise().
## Starting edgeR pairwise comparisons.
## About to round the data, this is a pretty terrible thing to do. But if you, like me, want to see what happens when you put non-standard data into deseq, then here you go.
## Warning in choose_binom_dataset(input, force = force): This data was
## inappropriately forced into integers.
## Choosing the non-intercept containing model.
## EdgeR step 1/9: Importing and normalizing data.
## EdgeR step 2/9: Estimating the common dispersion.
## EdgeR step 3/9: Estimating dispersion across genes.
## EdgeR step 4/9: Estimating GLM Common dispersion.
## EdgeR step 5/9: Estimating GLM Trended dispersion.
## EdgeR step 6/9: Estimating GLM Tagged dispersion.
## EdgeR step 7/9: Running glmFit, switch to glmQLFit by changing the argument 'edger_test'.
## EdgeR step 8/9: Making pairwise contrasts.
## Starting limma_pairwise().
## Starting limma pairwise comparison.
## Leaving the data alone, regardless of normalization state.
## libsize was not specified, this parameter has profound effects on limma's result.
## Using the libsize from expt$best_libsize.
## Limma step 1/6: choosing model.
## Choosing the non-intercept containing model.
## Limma step 2/6: running limma::voom(), switch with the argument 'which_voom'.
## Using normalize.method=quantile for voom.
## Limma step 3/6: running lmFit with method: ls.
## Limma step 4/6: making and fitting contrasts with no intercept. (~ 0 + factors)
## Limma step 5/6: Running eBayes with robust=FALSE and trend=FALSE.
## Limma step 6/6: Writing limma outputs.
## Limma step 6/6: 1/1: Creating table: wt_whole_vs_delta_whole. Adjust=BH
## Limma step 6/6: 1/2: Creating table: delta_whole. Adjust=BH
## Limma step 6/6: 2/2: Creating table: wt_whole. Adjust=BH
## Comparing analyses.
## Deleting the file excel/whole_filtered.xlsx before writing the tables.
## Writing a legend of columns.
## Printing a pca plot before/after surrogates/batch estimation.
## Working on table 1/1: wt_whole_vs_delta_whole
## 20181210 a pthread error in normalize.quantiles leads me to robust.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
## Adding venn plots for wt_whole_vs_delta_whole.
## Limma expression coefficients for wt_whole_vs_delta_whole; R^2: 0.908; equation: y = 0.966x + 0.24
## Edger expression coefficients for wt_whole_vs_delta_whole; R^2: 0.917; equation: y = 0.959x + 0.211
## DESeq2 expression coefficients for wt_whole_vs_delta_whole; R^2: 0.914; equation: y = 0.952x + 1.24
## Writing summary information.
## Attempting to add the comparison plot to pairwise_summary at row: 23 and column: 1
## Performing save of the workbook.
## Note: zip::zip() is deprecated, please use zip::zipr() instead
metadata <- pData(expt)
mtb_annotations <- fData(expt)
encyc_expt <- create_expt(metadata, savefile=glue::glue("encyclopedia_expt-v{ver}.rda"),
count_dataframe=enc_matrix, gene_info=mtb_annotations)
## Reading the sample metadata.
## Warning in `[<-.factor`(`*tmp*`, iseq, value = c("undefined",
## "undefined", : invalid factor level, NA generated
## Warning in `[<-.factor`(`*tmp*`, iseq, value = c("undefined",
## "undefined", : invalid factor level, NA generated
## The sample definitions comprises: 12 rows(samples) and 29 columns(metadata fields).
## Matched 2392 annotations and counts.
## Bringing together the count matrix and gene information.
## Some annotations were lost in merging, setting them to 'undefined'.
## There were 12, now there are 6 samples.
## This function will replace the expt$expressionset slot with:
## pofa(data)
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Leaving the data in its current base format, keep in mind that
## some metrics are easier to see when the data is log2 transformed, but
## EdgeR/DESeq do not accept transformed data.
## Leaving the data unconverted. It is often advisable to cpm/rpkm
## the data to normalize for sampling differences, keep in mind though that rpkm
## has some annoying biases, and voom() by default does a cpm (though hpgl_voom()
## will try to detect this).
## Leaving the data unnormalized. This is necessary for DESeq, but
## EdgeR/limma might benefit from normalization. Good choices include quantile,
## size-factor, tmm, etc.
## Not correcting the count-data for batch effects. If batch is
## included in EdgerR/limma's model, then this is probably wise; but in extreme
## batch effects this is a good parameter to play with.
## Step 1: performing count filter with option: pofa
## Removing 276 low-count genes (2235 remaining).
## Step 2: not normalizing the data.
## Step 3: not converting the data.
## Step 4: not transforming the data.
## Step 5: not doing batch correction.
## This function will replace the expt$expressionset slot with:
## log2(cpm(quant(cbcb(data))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Not correcting the count-data for batch effects. If batch is
## included in EdgerR/limma's model, then this is probably wise; but in extreme
## batch effects this is a good parameter to play with.
## Step 1: performing count filter with option: cbcb
## Removing 8 low-count genes (2227 remaining).
## Step 2: normalizing the data with quant.
## Using normalize.quantiles.robust due to a thread error in preprocessCore.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## Step 5: not doing batch correction.
## Plotting a PCA before surrogates/batch inclusion.
## Assuming no batch in model for testing pca.
## Starting basic_pairwise().
## Starting basic pairwise comparison.
## Leaving the data alone, regardless of normalization state.
## Basic step 0/3: Transforming data.
## Basic step 1/3: Creating median and variance tables.
## Basic step 2/3: Performing 3 comparisons.
## Basic step 3/3: Creating faux DE Tables.
## Basic: Returning tables.
## Starting deseq_pairwise().
## Starting DESeq2 pairwise comparisons.
## About to round the data, this is a pretty terrible thing to do. But if you, like me, want to see what happens when you put non-standard data into deseq, then here you go.
## Warning in choose_binom_dataset(input, force = force): This data was
## inappropriately forced into integers.
## Choosing the non-intercept containing model.
## DESeq2 step 1/5: Including only condition in the deseq model.
## Warning in import_deseq(data, column_data, model_string, tximport =
## input[["tximport"]][["raw"]]): Converted down 603 elements because they are
## larger than the maximum integer size.
## converting counts to integer mode
## DESeq2 step 2/5: Estimate size factors.
## DESeq2 step 3/5: Estimate dispersions.
## gene-wise dispersion estimates
## mean-dispersion relationship
## final dispersion estimates
## Using a parametric fitting seems to have worked.
## DESeq2 step 4/5: nbinomWaldTest.
## Starting ebseq_pairwise().
## The data should be suitable for EdgeR/DESeq/EBSeq. If they freak out, check the state of the count table and ensure that it is in integer counts.
## Starting EBSeq pairwise subset.
## Choosing the non-intercept containing model.
## Starting EBTest of delta_whole vs. wt_whole.
## Copying ppee values as ajusted p-values until I figure out how to deal with them.
## Starting edger_pairwise().
## Starting edgeR pairwise comparisons.
## About to round the data, this is a pretty terrible thing to do. But if you, like me, want to see what happens when you put non-standard data into deseq, then here you go.
## Warning in choose_binom_dataset(input, force = force): This data was
## inappropriately forced into integers.
## Choosing the non-intercept containing model.
## EdgeR step 1/9: Importing and normalizing data.
## EdgeR step 2/9: Estimating the common dispersion.
## EdgeR step 3/9: Estimating dispersion across genes.
## EdgeR step 4/9: Estimating GLM Common dispersion.
## EdgeR step 5/9: Estimating GLM Trended dispersion.
## EdgeR step 6/9: Estimating GLM Tagged dispersion.
## EdgeR step 7/9: Running glmFit, switch to glmQLFit by changing the argument 'edger_test'.
## EdgeR step 8/9: Making pairwise contrasts.
## Starting limma_pairwise().
## Starting limma pairwise comparison.
## Leaving the data alone, regardless of normalization state.
## libsize was not specified, this parameter has profound effects on limma's result.
## Using the libsize from expt$best_libsize.
## Limma step 1/6: choosing model.
## Choosing the non-intercept containing model.
## Limma step 2/6: running limma::voom(), switch with the argument 'which_voom'.
## Using normalize.method=quantile for voom.
## Limma step 3/6: running lmFit with method: ls.
## Limma step 4/6: making and fitting contrasts with no intercept. (~ 0 + factors)
## Limma step 5/6: Running eBayes with robust=FALSE and trend=FALSE.
## Limma step 6/6: Writing limma outputs.
## Limma step 6/6: 1/1: Creating table: wt_whole_vs_delta_whole. Adjust=BH
## Limma step 6/6: 1/2: Creating table: delta_whole. Adjust=BH
## Limma step 6/6: 2/2: Creating table: wt_whole. Adjust=BH
## Comparing analyses.
## Deleting the file excel/encyc_whole_filtered.xlsx before writing the tables.
## Writing a legend of columns.
## Printing a pca plot before/after surrogates/batch estimation.
## Working on table 1/1: wt_whole_vs_delta_whole
## 20181210 a pthread error in normalize.quantiles leads me to robust.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
## Adding venn plots for wt_whole_vs_delta_whole.
## Limma expression coefficients for wt_whole_vs_delta_whole; R^2: 0.947; equation: y = 0.977x + 0.154
## Edger expression coefficients for wt_whole_vs_delta_whole; R^2: 0.949; equation: y = 0.972x + 0.156
## DESeq2 expression coefficients for wt_whole_vs_delta_whole; R^2: 0.945; equation: y = 0.965x + 0.969
## Writing summary information.
## Attempting to add the comparison plot to pairwise_summary at row: 23 and column: 1
## Performing save of the workbook.
osw <- whole_table[["data"]][[1]][, c("edger_logfc", "deseq_logfc")]
enc <- encyc_table[["data"]][[1]][, c("edger_logfc", "deseq_logfc")]
test <- merge(osw, enc, by="row.names")
tt <- plot_linear_scatter(test[, c(2, 4)])
## Used Bon Ferroni corrected t test(s) between columns.
## Testing method: limma.
## Adding method: limma to the set.
## Testing method: deseq.
## Adding method: deseq to the set.
## Testing method: edger.
## Adding method: edger to the set.
## Testing method: ebseq.
## Adding method: ebseq to the set.
## Testing method: basic.
## Adding method: basic to the set.