I want to perform a series of comparisons among the host cells: human and mouse. Thus I need to collect annotation data for both species and get the set of orthologs between them.
## The biomart annotations file already exists, loading from it.
rownames(hs_annot) <- make.names(
paste0(hs_annot[["ensembl_transcript_id"]], ".",
hs_annot[["transcript_version"]]),
unique=TRUE)
hs_tx_gene <- hs_annot[, c("ensembl_gene_id", "ensembl_transcript_id")]
hs_tx_gene[["id"]] <- rownames(hs_tx_gene)
hs_tx_gene <- hs_tx_gene[, c("id", "ensembl_gene_id")]
new_hs_annot <- hs_annot
rownames(new_hs_annot) <- make.names(hs_annot[["ensembl_gene_id"]], unique=TRUE)The question is reasonably self-contained. I want to compare the uninfected human samples against any samples which were infected for 4 hours. So let us first pull those samples and then poke at them a bit.
sample_sheet <- "sample_sheets/leishmania_host_metasheet_20190401.xlsx"
hs_expt <- create_expt(sample_sheet,
file_column="hsapiensfile",
gene_info=new_hs_annot,
tx_gene_map=hs_tx_gene)## Reading the sample metadata.
## The sample definitions comprises: 437 rows(samples) and 55 columns(metadata fields).
## Reading count tables.
## Using the transcript to gene mapping.
## Reading salmon data with tximport.
## Finished reading count tables.
## Matched 19629 annotations and counts.
## Bringing together the count matrix and gene information.
## The mapped IDs are not the rownames of your gene information, changing them now.
## Some annotations were lost in merging, setting them to 'undefined'.
## There were 267, now there are 247 samples.
## There were 247, now there are 64 samples.
hs_t4h_expt <- set_expt_conditions(hs_t4h_expt, fact="infectstate")
hs_t4h_expt <- set_expt_batches(hs_t4h_expt, fact="study")
table(hs_t4h_expt$conditions)##
## bead no stim yes
## 3 18 35 8
##
## lps-timecourse m-gm-csf mbio
## 8 39 17
hs_t4h_norm <- normalize_expt(hs_t4h_expt, norm="quant", convert="cpm",
transform="log2", filter=TRUE, batch="svaseq")## This function will replace the expt$expressionset slot with:
## log2(svaseq(cpm(quant(cbcb(data)))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Warning in normalize_expt(hs_t4h_expt, norm = "quant", convert = "cpm", :
## Quantile normalization and sva do not always play well together.
## Step 1: performing count filter with option: cbcb
## Removing 7360 low-count genes (12269 remaining).
## Step 2: normalizing the data with quant.
## Using normalize.quantiles.robust due to a thread error in preprocessCore.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## transform_counts: Found 3822 values equal to 0, adding 1 to the matrix.
## Step 5: doing batch correction with svaseq.
## Note to self: If you get an error like 'x contains missing values' The data has too many 0's and needs a stronger low-count filter applied.
## Passing off to all_adjusters.
## batch_counts: Before batch/surrogate estimation, 711279 entries are x>1: 90.6%.
## batch_counts: Before batch/surrogate estimation, 3822 entries are x==0: 0.487%.
## batch_counts: Before batch/surrogate estimation, 70115 entries are 0<x<1: 8.93%.
## The be method chose 12 surrogate variable(s).
## Attempting svaseq estimation with 12 surrogates.
## There are 1760 (0.224%) elements which are < 0 after batch correction.
## There were 64, now there are 29 samples.
hs_t4h_inf_norm <- normalize_expt(hs_t4h_inf, transform="log2", convert="cpm",
filter=TRUE, batch="svaseq")## This function will replace the expt$expressionset slot with:
## log2(svaseq(cpm(cbcb(data))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Leaving the data unnormalized. This is necessary for DESeq, but
## EdgeR/limma might benefit from normalization. Good choices include quantile,
## size-factor, tmm, etc.
## Step 1: performing count filter with option: cbcb
## Removing 7759 low-count genes (11870 remaining).
## Step 2: not normalizing the data.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## transform_counts: Found 3276 values equal to 0, adding 1 to the matrix.
## Step 5: doing batch correction with svaseq.
## Note to self: If you get an error like 'x contains missing values' The data has too many 0's and needs a stronger low-count filter applied.
## Passing off to all_adjusters.
## batch_counts: Before batch/surrogate estimation, 319013 entries are x>1: 92.7%.
## batch_counts: Before batch/surrogate estimation, 3276 entries are x==0: 0.952%.
## batch_counts: Before batch/surrogate estimation, 21941 entries are 0<x<1: 6.37%.
## The be method chose 6 surrogate variable(s).
## Attempting svaseq estimation with 6 surrogates.
## There are 557 (0.162%) elements which are < 0 after batch correction.
keepers <- list("infection" = c("yes", "no"))
hs_t4h_de <- all_pairwise(hs_t4h_inf, model_batch="svaseq", filter=TRUE, force=TRUE)## batch_counts: Before batch/surrogate estimation, 339179 entries are x>1: 98.5%.
## batch_counts: Before batch/surrogate estimation, 3276 entries are x==0: 0.952%.
## batch_counts: Before batch/surrogate estimation, 216 entries are 0<x<1: 0.0627%.
## The be method chose 5 surrogate variable(s).
## Attempting svaseq estimation with 5 surrogates.
## This function will replace the expt$expressionset slot with:
## log2(cpm(quant(cbcb(data))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Not correcting the count-data for batch effects. If batch is
## included in EdgerR/limma's model, then this is probably wise; but in extreme
## batch effects this is a good parameter to play with.
## Step 1: performing count filter with option: cbcb
## Removing 0 low-count genes (11870 remaining).
## Step 2: normalizing the data with quant.
## Using normalize.quantiles.robust due to a thread error in preprocessCore.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## transform_counts: Found 1964 values equal to 0, adding 1 to the matrix.
## Step 5: not doing batch correction.
## Plotting a PCA before surrogates/batch inclusion.
## Using svaseq to visualize before/after batch inclusion.
## Performing a test normalization with: raw
## This function will replace the expt$expressionset slot with:
## log2(svaseq(cpm(cbcb(data))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Leaving the data unnormalized. This is necessary for DESeq, but
## EdgeR/limma might benefit from normalization. Good choices include quantile,
## size-factor, tmm, etc.
## Step 1: performing count filter with option: cbcb
## Removing 0 low-count genes (11870 remaining).
## Step 2: not normalizing the data.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## transform_counts: Found 3276 values equal to 0, adding 1 to the matrix.
## Step 5: doing batch correction with svaseq.
## Note to self: If you get an error like 'x contains missing values' The data has too many 0's and needs a stronger low-count filter applied.
## Passing off to all_adjusters.
## batch_counts: Before batch/surrogate estimation, 319013 entries are x>1: 92.7%.
## batch_counts: Before batch/surrogate estimation, 3276 entries are x==0: 0.952%.
## batch_counts: Before batch/surrogate estimation, 21941 entries are 0<x<1: 6.37%.
## The be method chose 6 surrogate variable(s).
## Attempting svaseq estimation with 6 surrogates.
## There are 557 (0.162%) elements which are < 0 after batch correction.
## Finished running DE analyses, collecting outputs.
## Comparing analyses.
## Writing a legend of columns.
## Working on 1/1: infection which is: yes/no.
## Found table with yes_vs_no
## 20181210 a pthread error in normalize.quantiles leads me to robust.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
I want to perform a series of comparisons among the host cells: human and mouse. Thus I need to collect annotation data for both species and get the set of orthologs between them.
## The biomart annotations file already exists, loading from it.
rownames(mm_annot) <- make.names(
paste0(mm_annot[["ensembl_transcript_id"]], ".",
mm_annot[["transcript_version"]]),
unique=TRUE)
mm_tx_gene <- mm_annot[, c("ensembl_gene_id", "ensembl_transcript_id")]
mm_tx_gene[["id"]] <- rownames(mm_tx_gene)
mm_tx_gene <- mm_tx_gene[, c("id", "ensembl_gene_id")]
new_mm_annot <- mm_annot
rownames(new_mm_annot) <- make.names(mm_annot[["ensembl_gene_id"]], unique=TRUE)The question is reasonably self-contained. I want to compare the uninfected human samples against any samples which were infected for 4 hours. So let us first pull those samples and then poke at them a bit.
mm_expt <- create_expt(sample_sheet,
file_column="mmusculusfile",
gene_info=new_mm_annot,
tx_gene_map=mm_tx_gene)## Reading the sample metadata.
## The sample definitions comprises: 437 rows(samples) and 55 columns(metadata fields).
## Reading count tables.
## Using the transcript to gene mapping.
## Reading salmon data with tximport.
## Finished reading count tables.
## Matched 19660 annotations and counts.
## Bringing together the count matrix and gene information.
## The mapped IDs are not the rownames of your gene information, changing them now.
## Some annotations were lost in merging, setting them to 'undefined'.
## There were 105, now there are 41 samples.
##
## no stim yes
## 11 24 6
##
## undefined
## 41
mm_t4h_norm <- normalize_expt(mm_t4h_expt, norm="quant", convert="cpm",
transform="log2", filter=TRUE, batch="svaseq")## This function will replace the expt$expressionset slot with:
## log2(svaseq(cpm(quant(cbcb(data)))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Warning in normalize_expt(mm_t4h_expt, norm = "quant", convert = "cpm", :
## Quantile normalization and sva do not always play well together.
## Step 1: performing count filter with option: cbcb
## Removing 9350 low-count genes (10310 remaining).
## Step 2: normalizing the data with quant.
## Using normalize.quantiles.robust due to a thread error in preprocessCore.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## transform_counts: Found 38 values equal to 0, adding 1 to the matrix.
## Step 5: doing batch correction with svaseq.
## Note to self: If you get an error like 'x contains missing values' The data has too many 0's and needs a stronger low-count filter applied.
## Passing off to all_adjusters.
## batch_counts: Before batch/surrogate estimation, 390888 entries are x>1: 92.5%.
## batch_counts: Before batch/surrogate estimation, 38 entries are x==0: 0.00899%.
## batch_counts: Before batch/surrogate estimation, 31784 entries are 0<x<1: 7.52%.
## The be method chose 8 surrogate variable(s).
## Attempting svaseq estimation with 8 surrogates.
## There are 807 (0.191%) elements which are < 0 after batch correction.
mm_t4h_inf_norm <- normalize_expt(mm_t4h_expt, transform="log2", convert="cpm",
filter=TRUE, batch="svaseq")## This function will replace the expt$expressionset slot with:
## log2(svaseq(cpm(cbcb(data))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Leaving the data unnormalized. This is necessary for DESeq, but
## EdgeR/limma might benefit from normalization. Good choices include quantile,
## size-factor, tmm, etc.
## Step 1: performing count filter with option: cbcb
## Removing 9350 low-count genes (10310 remaining).
## Step 2: not normalizing the data.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## transform_counts: Found 3253 values equal to 0, adding 1 to the matrix.
## Step 5: doing batch correction with svaseq.
## Note to self: If you get an error like 'x contains missing values' The data has too many 0's and needs a stronger low-count filter applied.
## Passing off to all_adjusters.
## batch_counts: Before batch/surrogate estimation, 385615 entries are x>1: 91.2%.
## batch_counts: Before batch/surrogate estimation, 3253 entries are x==0: 0.770%.
## batch_counts: Before batch/surrogate estimation, 33842 entries are 0<x<1: 8.01%.
## The be method chose 7 surrogate variable(s).
## Attempting svaseq estimation with 7 surrogates.
## There are 1186 (0.281%) elements which are < 0 after batch correction.
## batch_counts: Before batch/surrogate estimation, 417208 entries are x>1: 98.7%.
## batch_counts: Before batch/surrogate estimation, 3253 entries are x==0: 0.770%.
## batch_counts: Before batch/surrogate estimation, 392 entries are 0<x<1: 0.0927%.
## The be method chose 5 surrogate variable(s).
## Attempting svaseq estimation with 5 surrogates.
## This function will replace the expt$expressionset slot with:
## log2(cpm(quant(cbcb(data))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Not correcting the count-data for batch effects. If batch is
## included in EdgerR/limma's model, then this is probably wise; but in extreme
## batch effects this is a good parameter to play with.
## Step 1: performing count filter with option: cbcb
## Removing 0 low-count genes (10310 remaining).
## Step 2: normalizing the data with quant.
## Using normalize.quantiles.robust due to a thread error in preprocessCore.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## transform_counts: Found 38 values equal to 0, adding 1 to the matrix.
## Step 5: not doing batch correction.
## Plotting a PCA before surrogates/batch inclusion.
## Using svaseq to visualize before/after batch inclusion.
## Performing a test normalization with: raw
## This function will replace the expt$expressionset slot with:
## log2(svaseq(cpm(cbcb(data))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Leaving the data unnormalized. This is necessary for DESeq, but
## EdgeR/limma might benefit from normalization. Good choices include quantile,
## size-factor, tmm, etc.
## Step 1: performing count filter with option: cbcb
## Removing 0 low-count genes (10310 remaining).
## Step 2: not normalizing the data.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## transform_counts: Found 3253 values equal to 0, adding 1 to the matrix.
## Step 5: doing batch correction with svaseq.
## Note to self: If you get an error like 'x contains missing values' The data has too many 0's and needs a stronger low-count filter applied.
## Passing off to all_adjusters.
## batch_counts: Before batch/surrogate estimation, 385615 entries are x>1: 91.2%.
## batch_counts: Before batch/surrogate estimation, 3253 entries are x==0: 0.770%.
## batch_counts: Before batch/surrogate estimation, 33842 entries are 0<x<1: 8.01%.
## The be method chose 7 surrogate variable(s).
## Attempting svaseq estimation with 7 surrogates.
## There are 1186 (0.281%) elements which are < 0 after batch correction.
## Finished running DE analyses, collecting outputs.
## Comparing analyses.
## Writing a legend of columns.
## Working on 1/1: infection which is: yes/no.
## Found table with yes_vs_no
## 20181210 a pthread error in normalize.quantiles leads me to robust.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
Let us see if our human differential expression result is similar to that obtained in Table S2.
previous_hs <- readxl::read_excel("excel/inline-supplementary-material-5.xls", sheet=2)
previous_hs_lfc <- previous_hs[, c("ID", "Fold change")]
neg_idx <- previous_hs_lfc[[2]] < 0
previous_hs_lfc[neg_idx, 2] <- -1 * (1 / previous_hs_lfc[neg_idx, 2])
previous_hs_lfc[[2]] <- log2(previous_hs_lfc[[2]])
merged <- merge(previous_hs_lfc, hs_t4h_table$data[[1]], by.x="ID", by.y="row.names")
cor.test(merged[["limma_logfc"]], merged[["Fold change"]])##
## Pearson's product-moment correlation
##
## data: merged[["limma_logfc"]] and merged[["Fold change"]]
## t = 130, df = 5100, p-value <2e-16
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.8645 0.8778
## sample estimates:
## cor
## 0.8713
previous_mm <- readxl::read_excel("excel/12864_2015_2237_MOESM3_ESM.xls", sheet=2, skip=1)
previous_mm_lfc <- previous_mm[, c("ID", "Fold change")]
neg_idx <- previous_mm_lfc[[2]] < 0
previous_mm_lfc[neg_idx, 2] <- -1 * (1 / previous_mm_lfc[neg_idx, 2])
previous_mm_lfc[[2]] <- log2(previous_mm_lfc[[2]])
merged <- merge(previous_mm_lfc, mm_t4h_table$data[[1]], by.x="ID", by.y="row.names")
cor.test(merged[["limma_logfc"]], merged[["Fold change"]])##
## Pearson's product-moment correlation
##
## data: merged[["limma_logfc"]] and merged[["Fold change"]]
## t = 220, df = 5700, p-value <2e-16
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.9445 0.9498
## sample estimates:
## cor
## 0.9472
First get the set of orthologs.
Side note: a different way of addressing this question resides in 20190220_host_comparisons.Rmd.
## The defaults of this function are suitable for mouse/human queries.
mm_hs_ortho <- load_biomart_orthologs()$all_linked_genes
mm_table <- mm_t4h_table$data[[1]]
hs_table <- hs_t4h_table$data[[1]]
mm_table <- merge(mm_hs_ortho, mm_table, by.x="mmusculus", by.y="row.names", all.y=TRUE)
hs_table <- merge(mm_hs_ortho, hs_table, by.x="hsapiens", by.y="row.names", all.y=TRUE)
both_table_hs <- merge(hs_table, mm_table, by.x="hsapiens", by.y="hsapiens")
both_table_mm <- merge(hs_table, mm_table, by.x="mmusculus", by.y="mmusculus")
cor.test(both_table_hs[["limma_logfc.x"]], both_table_hs[["limma_logfc.y"]])##
## Pearson's product-moment correlation
##
## data: both_table_hs[["limma_logfc.x"]] and both_table_hs[["limma_logfc.y"]]
## t = 34, df = 14000, p-value <2e-16
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.2656 0.2964
## sample estimates:
## cor
## 0.2811
##
## Pearson's product-moment correlation
##
## data: both_table_mm[["limma_logfc.x"]] and both_table_mm[["limma_logfc.y"]]
## t = 34, df = 22000, p-value <2e-16
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.2135 0.2387
## sample estimates:
## cor
## 0.2261
Table S7 has a set of genes from human which are also up-regulated in mouse upon infection.
fig_s7_up <- readxl::read_excel("excel/inline-supplementary-material-7.xls", sheet=3)
fig_s7_hs_up <- unique(fig_s7_up[[5]])
fig_s7_down <- readxl::read_excel("excel/inline-supplementary-material-7.xls", sheet=4)
fig_s7_hs_down <- unique(fig_s7_down[[5]])
both_up_idx <- both_table_hs[["limma_logfc.x"]] >= 0.8 &
both_table_hs[["limma_logfc.y"]] >= 0.8 &
both_table_hs[["limma_adjp.x"]] <= 0.1 &
both_table_hs[["limma_adjp.y"]] <= 0.1
both_up_ids <- both_table_hs[both_up_idx, "hsapiens"]
up_venn <- Vennerable::Venn(Sets=list("figs7" = fig_s7_hs_up, "tables" = both_up_ids))
Vennerable::plot(up_venn)both_down_idx <- both_table_hs[["limma_logfc.x"]] <= -0.8 &
both_table_hs[["limma_logfc.y"]] <= -0.8 &
both_table_hs[["limma_adjp.x"]] <= 0.1 &
both_table_hs[["limma_adjp.y"]] <= 0.1
both_down_ids <- both_table_hs[both_down_idx, "hsapiens"]
down_venn <- Vennerable::Venn(Sets=list("figs7" = fig_s7_hs_down, "tables" = both_down_ids))
Vennerable::plot(down_venn)All of the neutrophil data is in mouse, apparently. This will make it more difficult, perhaps impossible to get an accurate answer.
So instead, look at infection vs. uninfected in mouse and then compare to the earliest Sacks’ timepoints in neutrophils.
subset <- "(hostcelltype=='PMN'&host=='mus_musculus'&expttime=='t12h') |
(hostcelltype=='macrophage'&host=='mus_musculus')"
neut_macr_mus <- subset_expt(mm_expt, subset=subset)## There were 105, now there are 80 samples.
## There were 80, now there are 56 samples.
neut_macr_mus <- set_expt_conditions(neut_macr_mus, fact="infectstate")
neut_macr_mus <- set_expt_batches(neut_macr_mus, fact="hostcelltype")
neut_macr_mus_norm <- normalize_expt(neut_macr_mus, convert="cpm",
norm="quant", filter=TRUE)## This function will replace the expt$expressionset slot with:
## cpm(quant(cbcb(data)))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Leaving the data in its current base format, keep in mind that
## some metrics are easier to see when the data is log2 transformed, but
## EdgeR/DESeq do not accept transformed data.
## Not correcting the count-data for batch effects. If batch is
## included in EdgerR/limma's model, then this is probably wise; but in extreme
## batch effects this is a good parameter to play with.
## Step 1: performing count filter with option: cbcb
## Removing 0 low-count genes (19660 remaining).
## Step 2: normalizing the data with quant.
## Using normalize.quantiles.robust due to a thread error in preprocessCore.
## Step 3: converting the data with cpm.
## Step 4: not transforming the data.
## Step 5: not doing batch correction.
neut_macr_mus_normbatch <- normalize_expt(neut_macr_mus, convert="cpm",
norm="quant", filter=TRUE, batch="svaseq")## This function will replace the expt$expressionset slot with:
## svaseq(cpm(quant(cbcb(data))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Leaving the data in its current base format, keep in mind that
## some metrics are easier to see when the data is log2 transformed, but
## EdgeR/DESeq do not accept transformed data.
## Warning in normalize_expt(neut_macr_mus, convert = "cpm", norm = "quant", :
## Quantile normalization and sva do not always play well together.
## Step 1: performing count filter with option: cbcb
## Removing 0 low-count genes (19660 remaining).
## Step 2: normalizing the data with quant.
## Using normalize.quantiles.robust due to a thread error in preprocessCore.
## Step 3: converting the data with cpm.
## Step 4: not transforming the data.
## Step 5: doing batch correction with svaseq.
## Note to self: If you get an error like 'x contains missing values' The data has too many 0's and needs a stronger low-count filter applied.
## Passing off to all_adjusters.
## batch_counts: Before batch/surrogate estimation, 515332 entries are x>1: 46.8%.
## batch_counts: Before batch/surrogate estimation, 323375 entries are x==0: 29.4%.
## batch_counts: Before batch/surrogate estimation, 262253 entries are 0<x<1: 23.8%.
## The be method chose 10 surrogate variable(s).
## Attempting svaseq estimation with 10 surrogates.
## There are 30709 (2.79%) elements which are < 0 after batch correction.
## This function will replace the expt$expressionset slot with:
## simple(data)
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Leaving the data in its current base format, keep in mind that
## some metrics are easier to see when the data is log2 transformed, but
## EdgeR/DESeq do not accept transformed data.
## Leaving the data unconverted. It is often advisable to cpm/rpkm
## the data to normalize for sampling differences, keep in mind though that rpkm
## has some annoying biases, and voom() by default does a cpm (though hpgl_voom()
## will try to detect this).
## Leaving the data unnormalized. This is necessary for DESeq, but
## EdgeR/limma might benefit from normalization. Good choices include quantile,
## size-factor, tmm, etc.
## Not correcting the count-data for batch effects. If batch is
## included in EdgerR/limma's model, then this is probably wise; but in extreme
## batch effects this is a good parameter to play with.
## Step 1: performing count filter with option: simple
## Removing 3175 low-count genes (16485 remaining).
## Step 2: not normalizing the data.
## Step 3: not converting the data.
## Step 4: not transforming the data.
## Step 5: not doing batch correction.
neut_macr_mus_de <- all_pairwise(neut_macr_mus_filt, parallel=FALSE,
force=TRUE, model_batch="svaseq")## batch_counts: Before batch/surrogate estimation, 616059 entries are x>1: 66.7%.
## batch_counts: Before batch/surrogate estimation, 269846 entries are x==0: 29.2%.
## batch_counts: Before batch/surrogate estimation, 2347 entries are 0<x<1: 0.254%.
## The be method chose 3 surrogate variable(s).
## Attempting svaseq estimation with 3 surrogates.
## This function will replace the expt$expressionset slot with:
## log2(cpm(quant(cbcb(data))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Not correcting the count-data for batch effects. If batch is
## included in EdgerR/limma's model, then this is probably wise; but in extreme
## batch effects this is a good parameter to play with.
## Step 1: performing count filter with option: cbcb
## Removing 0 low-count genes (16485 remaining).
## Step 2: normalizing the data with quant.
## Using normalize.quantiles.robust due to a thread error in preprocessCore.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## transform_counts: Found 117451 values equal to 0, adding 1 to the matrix.
## Step 5: not doing batch correction.
## Plotting a PCA before surrogates/batch inclusion.
## Using svaseq to visualize before/after batch inclusion.
## Performing a test normalization with: raw
## This function will replace the expt$expressionset slot with:
## log2(svaseq(cpm(cbcb(data))))
## It backs up the current data into a slot named:
## expt$backup_expressionset. It will also save copies of each step along the way
## in expt$normalized with the corresponding libsizes. Keep the libsizes in mind
## when invoking limma. The appropriate libsize is the non-log(cpm(normalized)).
## This is most likely kept at:
## 'new_expt$normalized$intermediate_counts$normalization$libsizes'
## A copy of this may also be found at:
## new_expt$best_libsize
## Leaving the data unnormalized. This is necessary for DESeq, but
## EdgeR/limma might benefit from normalization. Good choices include quantile,
## size-factor, tmm, etc.
## Step 1: performing count filter with option: cbcb
## Removing 0 low-count genes (16485 remaining).
## Step 2: not normalizing the data.
## Step 3: converting the data with cpm.
## Step 4: transforming the data with log2.
## transform_counts: Found 269846 values equal to 0, adding 1 to the matrix.
## Step 5: doing batch correction with svaseq.
## Note to self: If you get an error like 'x contains missing values' The data has too many 0's and needs a stronger low-count filter applied.
## Passing off to all_adjusters.
## batch_counts: Before batch/surrogate estimation, 511083 entries are x>1: 55.4%.
## batch_counts: Before batch/surrogate estimation, 269846 entries are x==0: 29.2%.
## batch_counts: Before batch/surrogate estimation, 142231 entries are 0<x<1: 15.4%.
## The be method chose 7 surrogate variable(s).
## Attempting svaseq estimation with 7 surrogates.
## There are 61691 (6.68%) elements which are < 0 after batch correction.
## Starting basic_pairwise().
## Starting basic pairwise comparison.
## Leaving the data alone, regardless of normalization state.
## Basic step 0/3: Transforming data.
## Basic step 1/3: Creating median and variance tables.
## Basic step 2/3: Performing 3 comparisons.
## Basic step 3/3: Creating faux DE Tables.
## Basic: Returning tables.
## Starting deseq_pairwise().
## Starting DESeq2 pairwise comparisons.
## About to round the data, this is a pretty terrible thing to do. But if you, like me, want to see what happens when you put non-standard data into deseq, then here you go.
## Warning in choose_binom_dataset(input, force = force): This data was
## inappropriately forced into integers.
## Including batch estimates from sva/ruv/pca in the model.
## Choosing the non-intercept containing model.
## DESeq2 step 1/5: Including a matrix of batch estimates in the deseq model.
## converting counts to integer mode
## DESeq2 step 2/5: Estimate size factors.
## DESeq2 step 3/5: Estimate dispersions.
## gene-wise dispersion estimates
## mean-dispersion relationship
## final dispersion estimates
## Using a parametric fitting seems to have worked.
## DESeq2 step 4/5: nbinomWaldTest.
## Starting ebseq_pairwise().
## The data should be suitable for EdgeR/DESeq/EBSeq. If they freak out, check the state of the count table and ensure that it is in integer counts.
## Starting EBSeq pairwise subset.
## Choosing the non-intercept containing model.
## Starting EBTest of no vs. yes.
## Copying ppee values as ajusted p-values until I figure out how to deal with them.
## Starting edger_pairwise().
## Starting edgeR pairwise comparisons.
## About to round the data, this is a pretty terrible thing to do. But if you, like me, want to see what happens when you put non-standard data into deseq, then here you go.
## Warning in choose_binom_dataset(input, force = force): This data was
## inappropriately forced into integers.
## Including batch estimates from sva/ruv/pca in the model.
## Choosing the non-intercept containing model.
## EdgeR step 1/9: Importing and normalizing data.
## EdgeR step 2/9: Estimating the common dispersion.
## EdgeR step 3/9: Estimating dispersion across genes.
## EdgeR step 4/9: Estimating GLM Common dispersion.
## EdgeR step 5/9: Estimating GLM Trended dispersion.
## EdgeR step 6/9: Estimating GLM Tagged dispersion.
## EdgeR step 7/9: Running glmFit, switch to glmQLFit by changing the argument 'edger_test'.
## EdgeR step 8/9: Making pairwise contrasts.
## Starting limma_pairwise().
## Starting limma pairwise comparison.
## Leaving the data alone, regardless of normalization state.
## libsize was not specified, this parameter has profound effects on limma's result.
## Using the libsize from expt$best_libsize.
## Limma step 1/6: choosing model.
## Including batch estimates from sva/ruv/pca in the model.
## Choosing the non-intercept containing model.
## Limma step 2/6: running limma::voom(), switch with the argument 'which_voom'.
## Using normalize.method=quantile for voom.
## Limma step 3/6: running lmFit with method: ls.
## Limma step 4/6: making and fitting contrasts with no intercept. (~ 0 + factors)
## Limma step 5/6: Running eBayes with robust=FALSE and trend=FALSE.
## Limma step 6/6: Writing limma outputs.
## Limma step 6/6: 1/1: Creating table: yes_vs_no. Adjust=BH
## Limma step 6/6: 1/2: Creating table: no. Adjust=BH
## Limma step 6/6: 2/2: Creating table: yes. Adjust=BH
## Comparing analyses.
## Writing a legend of columns.
## Working on 1/1: infection which is: yes/no.
## Found table with yes_vs_no
## 20181210 a pthread error in normalize.quantiles leads me to robust.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
R version 3.5.3 (2019-03-11)
Platform: x86_64-pc-linux-gnu (64-bit)
locale: LC_CTYPE=en_US.utf8, LC_NUMERIC=C, LC_TIME=en_US.utf8, LC_COLLATE=en_US.utf8, LC_MONETARY=en_US.utf8, LC_MESSAGES=en_US.utf8, LC_PAPER=en_US.utf8, LC_NAME=C, LC_ADDRESS=C, LC_TELEPHONE=C, LC_MEASUREMENT=en_US.utf8 and LC_IDENTIFICATION=C
attached base packages: parallel, stats, graphics, grDevices, utils, datasets, methods and base
other attached packages: edgeR(v.3.24.3), ruv(v.0.9.7), hpgltools(v.1.0), Biobase(v.2.42.0) and BiocGenerics(v.0.28.0)
loaded via a namespace (and not attached): tidyselect(v.0.2.5), lme4(v.1.1-21), htmlwidgets(v.1.3), RSQLite(v.2.1.1), AnnotationDbi(v.1.44.0), grid(v.3.5.3), BiocParallel(v.1.16.6), Rtsne(v.0.15), devtools(v.2.0.1), munsell(v.0.5.0), codetools(v.0.2-16), preprocessCore(v.1.45.0), withr(v.2.1.2), colorspace(v.1.4-1), GOSemSim(v.2.8.0), knitr(v.1.22), rstudioapi(v.0.10), stats4(v.3.5.3), Vennerable(v.3.1.0.9000), robustbase(v.0.93-4), DOSE(v.3.8.2), labeling(v.0.3), urltools(v.1.7.2), tximport(v.1.10.1), GenomeInfoDbData(v.1.2.0), polyclip(v.1.10-0), bit64(v.0.9-7), farver(v.1.1.0), rprojroot(v.1.3-2), xfun(v.0.6), R6(v.2.4.0), doParallel(v.1.0.14), GenomeInfoDb(v.1.18.2), locfit(v.1.5-9.1), bitops(v.1.0-6), fgsea(v.1.8.0), gridGraphics(v.0.3-0), DelayedArray(v.0.8.0), assertthat(v.0.2.1), scales(v.1.0.0), ggraph(v.1.0.2), nnet(v.7.3-12), enrichplot(v.1.2.0), gtable(v.0.3.0), sva(v.3.30.1), processx(v.3.3.0), rlang(v.0.3.3), genefilter(v.1.64.0), splines(v.3.5.3), rtracklayer(v.1.42.2), lazyeval(v.0.2.2), acepack(v.1.4.1), checkmate(v.1.9.1), europepmc(v.0.3), yaml(v.2.2.0), reshape2(v.1.4.3), GenomicFeatures(v.1.34.7), backports(v.1.1.3), qvalue(v.2.14.1), Hmisc(v.4.2-0), RBGL(v.1.58.2), clusterProfiler(v.3.10.1), tools(v.3.5.3), usethis(v.1.4.0), ggplotify(v.0.0.3), ggplot2(v.3.1.0), gplots(v.3.0.1.1), RColorBrewer(v.1.1-2), blockmodeling(v.0.3.4), sessioninfo(v.1.1.1), ggridges(v.0.5.1), Rcpp(v.1.0.1), plyr(v.1.8.4), base64enc(v.0.1-3), progress(v.1.2.0), zlibbioc(v.1.28.0), purrr(v.0.3.2), RCurl(v.1.95-4.12), ps(v.1.3.0), prettyunits(v.1.0.2), rpart(v.4.1-13), viridis(v.0.5.1), cowplot(v.0.9.4), S4Vectors(v.0.20.1), SummarizedExperiment(v.1.12.0), ggrepel(v.0.8.0), cluster(v.2.0.8), colorRamps(v.2.3), fs(v.1.2.7), variancePartition(v.1.12.3), magrittr(v.1.5), data.table(v.1.12.0), DO.db(v.2.9), openxlsx(v.4.1.0), triebeard(v.0.3.0), packrat(v.0.5.0), matrixStats(v.0.54.0), pkgload(v.1.0.2), hms(v.0.4.2), evaluate(v.0.13), xtable(v.1.8-3), pbkrtest(v.0.4-7), XML(v.3.98-1.19), readxl(v.1.3.1), IRanges(v.2.16.0), gridExtra(v.2.3), testthat(v.2.0.1), compiler(v.3.5.3), biomaRt(v.2.38.0), tibble(v.2.1.1), KernSmooth(v.2.23-15), crayon(v.1.3.4), minqa(v.1.2.4), htmltools(v.0.3.6), mgcv(v.1.8-28), corpcor(v.1.6.9), Formula(v.1.2-3), geneplotter(v.1.60.0), tidyr(v.0.8.3), DBI(v.1.0.0), tweenr(v.1.0.1), MASS(v.7.3-51.3), boot(v.1.3-20), Matrix(v.1.2-17), readr(v.1.3.1), cli(v.1.1.0), quadprog(v.1.5-5), gdata(v.2.18.0), igraph(v.1.2.4), GenomicRanges(v.1.34.0), pkgconfig(v.2.0.2), registry(v.0.5-1), rvcheck(v.0.1.3), GenomicAlignments(v.1.18.1), foreign(v.0.8-71), xml2(v.1.2.0), foreach(v.1.4.4), annotate(v.1.60.1), rngtools(v.1.3.1), pkgmaker(v.0.27), XVector(v.0.22.0), bibtex(v.0.4.2), doRNG(v.1.7.1), EBSeq(v.1.22.1), stringr(v.1.4.0), callr(v.3.2.0), digest(v.0.6.18), graph(v.1.60.0), Biostrings(v.2.50.2), cellranger(v.1.1.0), rmarkdown(v.1.12), fastmatch(v.1.1-0), htmlTable(v.1.13.1), directlabels(v.2018.05.22), curl(v.3.3), Rsamtools(v.1.34.1), gtools(v.3.8.1), nloptr(v.1.2.1), nlme(v.3.1-137), jsonlite(v.1.6), desc(v.1.2.0), viridisLite(v.0.3.0), limma(v.3.38.3), pillar(v.1.3.1), lattice(v.0.20-38), DEoptimR(v.1.0-8), httr(v.1.4.0), pkgbuild(v.1.0.3), survival(v.2.44-1.1), GO.db(v.3.7.0), glue(v.1.3.1), remotes(v.2.0.2), zip(v.2.0.1), UpSetR(v.1.3.3), iterators(v.1.0.10), pander(v.0.6.3), bit(v.1.1-14), ggforce(v.0.2.1), stringi(v.1.4.3), blob(v.1.1.1), DESeq2(v.1.22.2), latticeExtra(v.0.6-28), caTools(v.1.17.1.2), memoise(v.1.1.0) and dplyr(v.0.8.0.1)
## If you wish to reproduce this exact build of hpgltools, invoke the following:
## > git clone http://github.com/abelew/hpgltools.git
## > git reset dcf2a1659165dacca91ade9f3f8e56f234633e55
## This is hpgltools commit: Thu Apr 4 14:58:27 2019 -0400: dcf2a1659165dacca91ade9f3f8e56f234633e55