I am using my slightly modified copy of SWATH2stats. This seeks to ensure that changes in the case of columns in the metadata from one version of OpenMS to another do not trouble me.
There is one important caveat in the following block: I used a regex to remove the second half of geneID_geneName so that later when I merge in the annotation data I have it will match.
In response to some interesting queries from Yan, I made a few little functions which query and plot data from the scored data provided by openswath/pyprophet. Let us look at their results here.
pyp_metadata <- glue::glue("sample_sheets/Mtb_dia_samples_{ver}.xlsx")
pyprophet_fun <- extract_pyprophet_data(metadata=pyp_metadata,
pyprophet_column="diascored")
## Attempting to read the tsv file for: 2019_0709Briken01: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken01_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken02: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken02_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken03: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken03_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken04: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken04_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken05: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken05_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken06: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken06_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken07: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken07_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken08: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken08_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken09: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken09_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken10: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken10_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken11: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken11_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken12: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken12_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken13: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken13_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken14: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken14_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken15: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken15_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken16: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken16_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken17: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken17_vs_20190718_whole_HCD_dia_scored.tsv.
## Attempting to read the tsv file for: 2019_0709Briken18: preprocessing/08pyprophet/20190718/whole_8mz_tuberculist/2019_0709Briken18_vs_20190718_whole_HCD_dia_scored.tsv.
## Visualize the mass distributions of each sample
mass_plot <- plot_pyprophet_distribution(pyprophet_fun, column="mass")
## Adding 2019_0709Briken01
## Adding 2019_0709Briken02
## Adding 2019_0709Briken03
## Adding 2019_0709Briken04
## Adding 2019_0709Briken05
## Adding 2019_0709Briken06
## Adding 2019_0709Briken07
## Adding 2019_0709Briken08
## Adding 2019_0709Briken09
## Adding 2019_0709Briken10
## Adding 2019_0709Briken11
## Adding 2019_0709Briken12
## Adding 2019_0709Briken13
## Adding 2019_0709Briken14
## Adding 2019_0709Briken15
## Adding 2019_0709Briken16
## Adding 2019_0709Briken17
## Adding 2019_0709Briken18
## Writing the image to: images/masses_observed.png and calling dev.off().
## That second to last sample looks pretty odd.
## Look at the delta rt times observed. I am continually puzzled as to why these numbers
## get so high. I suspec the actual reason is that I do not understand this column in the
## data, but finding reliable documentation is non-trivial, I just blew 45 minutes (re)reading
## a couple of reviews and some papers in which I thought I saw relevant information
## and found nothing.
deltart_plot_all <- sm(plot_pyprophet_distribution(
pyprophet_fun, column="delta_rt"))
pp(file="images/drt_observed.png", image=deltart_plot_all[["violin"]])
## Writing the image to: images/drt_observed.png and calling dev.off().
## This plot is the same as above, but includes _only_ the non-decoy values.
deltart_plot_real <- sm(plot_pyprophet_distribution(
pyprophet_fun,
column="delta_rt", keep_decoys=FALSE))
deltart_plot_real[["violin"]]
## And this time we have _only_ the decoys. I am not really sure what one should expect in
## these, but the differences are definitely intriguing.
deltart_plot_decoys <- sm(plot_pyprophet_distribution(
pyprophet_fun,
column="delta_rt", keep_real=FALSE))
deltart_plot_decoys[["violin"]]
## How many identifications were observed in each sample?
pyprophet_identifications <- sm(plot_pyprophet_counts(
pyprophet_fun, keep_decoys=FALSE,
type="count"))
pp(file="images/num_identifications.png", image=pyprophet_identifications$plot)
## Writing the image to: images/num_identifications.png and calling dev.off().
## The range in values is a little surprising to me, but I do not
## think it is crazytown.
## Sum(intensity) vs. sum(identifications). We saw in a previous plot that sample
## 17 was odd, so I would sort of expect it to be far away on this plot?
pyprophet_xy <- plot_pyprophet_xy(
pyprophet_fun,
x_type="count", y_type="intensity")
## Adding 2019_0709Briken01
## Adding 2019_0709Briken02
## Adding 2019_0709Briken03
## Adding 2019_0709Briken04
## Adding 2019_0709Briken05
## Adding 2019_0709Briken06
## Adding 2019_0709Briken07
## Adding 2019_0709Briken08
## Adding 2019_0709Briken09
## Adding 2019_0709Briken10
## Adding 2019_0709Briken11
## Adding 2019_0709Briken12
## Adding 2019_0709Briken13
## Adding 2019_0709Briken14
## Adding 2019_0709Briken15
## Adding 2019_0709Briken16
## Adding 2019_0709Briken17
## Adding 2019_0709Briken18
## Adding 2019_0709Briken01
## Adding 2019_0709Briken02
## Adding 2019_0709Briken03
## Adding 2019_0709Briken04
## Adding 2019_0709Briken05
## Adding 2019_0709Briken06
## Adding 2019_0709Briken07
## Adding 2019_0709Briken08
## Adding 2019_0709Briken09
## Adding 2019_0709Briken10
## Adding 2019_0709Briken11
## Adding 2019_0709Briken12
## Adding 2019_0709Briken13
## Adding 2019_0709Briken14
## Adding 2019_0709Briken15
## Adding 2019_0709Briken16
## Adding 2019_0709Briken17
## Adding 2019_0709Briken18
## Writing the image to: images/counts_vs_intensities.png and calling dev.off().
## hmm I do not see a strong trend
## This has so far been a pretty reliable plot to show that the observed peak widths are
## very consistent across samples.
pyprophet_lwidths <- sm(plot_pyprophet_xy(
pyprophet_fun,
x_type="count", y_type="leftwidth"))
pp(file="images/lwidths_vs_counts.png", image=pyprophet_lwidths)
## Writing the image to: images/lwidths_vs_counts.png and calling dev.off().
There are a few proteins for which Volker has relatively specific assumptions/expectations. Let us see what they look like and if they follow a trend which makes some sense…
The primary thing to recall, I think, is that in our previous data sets, there were a pretty large number of samples for which no identifications were made for many of these proteins. Does that remain true?
intensities_esxG <- sm(plot_pyprophet_protein(pyprophet_fun, scale="log",
title="esxG Intensities",
column="intensity", protein="Rv0287"))
pp(file=paste0("images/osw_esxG_intensities-v", ver, ".png"), image=intensities_esxG)
## Writing the image to: images/osw_esxG_intensities-v20190718.png and calling dev.off().
## Is this a good or terrible spread of observed intensities for proteomics data?
## If I found a range like this in RNASeq data, I would just throw it away out of hand.
## Sample 17 did not have any observations, but everything else did.
## The range of dRT values. I wish this metric made sense to me!
drt_esxG <- plot_pyprophet_protein(pyprophet_fun,
column="delta_rt", protein="Rv0287",
min_data=100, max_data=1000)
## Adding 2019_0709Briken01
## Adding 2019_0709Briken02
## Adding 2019_0709Briken03
## Adding 2019_0709Briken04
## Adding 2019_0709Briken05
## Adding 2019_0709Briken06
## Adding 2019_0709Briken07
## Adding 2019_0709Briken08
## Adding 2019_0709Briken09
## Adding 2019_0709Briken10
## Adding 2019_0709Briken11
## Adding 2019_0709Briken12
## Adding 2019_0709Briken13
## Adding 2019_0709Briken14
## Adding 2019_0709Briken15
## Adding 2019_0709Briken16
## Adding 2019_0709Briken17
## Adding 2019_0709Briken18
intensities_esxH <- plot_pyprophet_protein(pyprophet_fun,
title="esxH Intensities",
scale="log", column="intensity", protein="Rv0288")
## Adding 2019_0709Briken01
## Adding 2019_0709Briken02
## Adding 2019_0709Briken03
## Adding 2019_0709Briken04
## Adding 2019_0709Briken05
## Adding 2019_0709Briken06
## Adding 2019_0709Briken07
## Adding 2019_0709Briken08
## Adding 2019_0709Briken09
## Adding 2019_0709Briken10
## Adding 2019_0709Briken11
## Adding 2019_0709Briken12
## Adding 2019_0709Briken13
## Adding 2019_0709Briken14
## Adding 2019_0709Briken15
## Adding 2019_0709Briken16
## Adding 2019_0709Briken17
## Adding 2019_0709Briken18
## Writing the image to: images/osw_esxH_intensities-v20190718.png and calling dev.off().
## All samples have observations, but a bit sparser.
intensities_lpqH <- plot_pyprophet_protein(pyprophet_fun, scale="log",
title="lpqH_intensities", column="intensity", protein="Rv3763")
## Adding 2019_0709Briken01
## Adding 2019_0709Briken02
## Adding 2019_0709Briken03
## Adding 2019_0709Briken04
## Adding 2019_0709Briken05
## Adding 2019_0709Briken06
## Adding 2019_0709Briken07
## Adding 2019_0709Briken08
## Adding 2019_0709Briken09
## Adding 2019_0709Briken10
## Adding 2019_0709Briken11
## Adding 2019_0709Briken12
## Adding 2019_0709Briken13
## Adding 2019_0709Briken14
## Adding 2019_0709Briken15
## Adding 2019_0709Briken16
## Adding 2019_0709Briken17
## Adding 2019_0709Briken18
## Writing the image to: images/osw_lpqh_intensities-v20190718.png and calling dev.off().
## Very few observations, but they are relatively consistent?
intensities_groel1 <- plot_pyprophet_protein(pyprophet_fun,
title="groEL1 intensities", scale="log",
column="intensity", protein="Rv3417")
## Adding 2019_0709Briken01
## Adding 2019_0709Briken02
## Adding 2019_0709Briken03
## Adding 2019_0709Briken04
## Adding 2019_0709Briken05
## Adding 2019_0709Briken06
## Adding 2019_0709Briken07
## Adding 2019_0709Briken08
## Adding 2019_0709Briken09
## Adding 2019_0709Briken10
## Adding 2019_0709Briken11
## Adding 2019_0709Briken12
## Adding 2019_0709Briken13
## Adding 2019_0709Briken14
## Adding 2019_0709Briken15
## Adding 2019_0709Briken16
## Adding 2019_0709Briken17
## Adding 2019_0709Briken18
## Writing the image to: images/osw_groel1_intensities-v20190718.png and calling dev.off().
intensities_groel2 <- plot_pyprophet_protein(pyprophet_fun,
title="groEL2 intensities", scale="log",
column="intensity", protein="Rv0440")
## Adding 2019_0709Briken01
## Adding 2019_0709Briken02
## Adding 2019_0709Briken03
## Adding 2019_0709Briken04
## Adding 2019_0709Briken05
## Adding 2019_0709Briken06
## Adding 2019_0709Briken07
## Adding 2019_0709Briken08
## Adding 2019_0709Briken09
## Adding 2019_0709Briken10
## Adding 2019_0709Briken11
## Adding 2019_0709Briken12
## Adding 2019_0709Briken13
## Adding 2019_0709Briken14
## Adding 2019_0709Briken15
## Adding 2019_0709Briken16
## Adding 2019_0709Briken17
## Adding 2019_0709Briken18
## Writing the image to: images/osw_groel2_intensities-v20190718.png and calling dev.off().
## Man I am loving the density of observation, but I wish they were more consistent!
## Also, sample 17 is sparser.
intensities_fap <- plot_pyprophet_protein(pyprophet_fun,
title="fap intensities", scale="log",
column="intensity", protein="Rv1860")
## Adding 2019_0709Briken01
## Adding 2019_0709Briken02
## Adding 2019_0709Briken03
## Adding 2019_0709Briken04
## Adding 2019_0709Briken05
## Adding 2019_0709Briken06
## Adding 2019_0709Briken07
## Adding 2019_0709Briken08
## Adding 2019_0709Briken09
## Adding 2019_0709Briken10
## Adding 2019_0709Briken11
## Adding 2019_0709Briken12
## Adding 2019_0709Briken13
## Adding 2019_0709Briken14
## Adding 2019_0709Briken15
## Adding 2019_0709Briken16
## Adding 2019_0709Briken17
## Adding 2019_0709Briken18
## Writing the image to: images/osw_fap_intensities-v20190718.png and calling dev.off().
## Sample 17 is basically a null.
intensities_katg <- plot_pyprophet_protein(pyprophet_fun,
title="katG intensities", scale="log",
column="intensity", protein="Rv1908")
## Adding 2019_0709Briken01
## Adding 2019_0709Briken02
## Adding 2019_0709Briken03
## Adding 2019_0709Briken04
## Adding 2019_0709Briken05
## Adding 2019_0709Briken06
## Adding 2019_0709Briken07
## Adding 2019_0709Briken08
## Adding 2019_0709Briken09
## Adding 2019_0709Briken10
## Adding 2019_0709Briken11
## Adding 2019_0709Briken12
## Adding 2019_0709Briken13
## Adding 2019_0709Briken14
## Adding 2019_0709Briken15
## Adding 2019_0709Briken16
## Adding 2019_0709Briken17
## Adding 2019_0709Briken18
## Writing the image to: images/osw_katg_intensities.png and calling dev.off().
I want to load the data and metadata into SWATH2stats in preparation for MSstats and my own hpgltools-base analyses.
tric_file <- file.path("preprocessing", "09tric", ver, "whole_8mz_tuberculist", "comet_HCD.tsv")
tric_data <- readr::read_tsv(tric_file)
## Parsed with column specification:
## cols(
## .default = col_double(),
## run_id = col_character(),
## filename = col_character(),
## Sequence = col_character(),
## FullPeptideName = col_character(),
## aggr_Peak_Area = col_logical(),
## aggr_Peak_Apex = col_logical(),
## aggr_Fragment_Annotation = col_logical(),
## ProteinName = col_character(),
## align_runid = col_character(),
## align_origfilename = col_character()
## )
## See spec(...) for full column specifications.
tric_data[["ProteinName"]] <- gsub(pattern="^(.*)_.*$", replacement="\\1",
x=tric_data[["ProteinName"]])
sample_sheet <- file.path("sample_sheets", glue::glue("Mtb_dia_samples_{ver}.xlsx"))
sample_annot <- extract_metadata(sample_sheet)
kept <- ! grepl(x=rownames(sample_annot), pattern="^s\\.\\.")
sample_annot <- sample_annot[kept, ]
devtools::load_all("~/scratch/git/SWATH2stats_myforked")
## Loading SWATH2stats
s2s_exp <- sample_annotation(data=tric_data, verbose=TRUE,
sample_annotation=sample_annot,
fullpeptidename_column="fullpeptidename")
## Found the same mzXML files in the annotations and data.
## preprocessing/01mzXML/dia/20190718/2019_0709Briken01.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken02.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken03.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken04.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken05.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken06.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken07.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken08.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken09.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken10.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken11.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken12.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken13.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken14.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken15.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken16.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken17.mzXML
## preprocessing/01mzXML/dia/20190718/2019_0709Briken18.mzXML
## 18 samples were read from the annotations.
## 82154 transitions were read from the data and merged with the annotations.
Now I have a couple data structures which should prove useful for the metrics provided by SWATH2stats, MSstats, and my own hpgltools.
The various metrics and filters provided by SWATH2stats seem quite reasonable to me. The only thing that really bothers me is that they are all case sensitive and I found that the most recent tric changed the capitalization of a column, causing these to all fall down. Therefore I went in and made everything case insensitive in a fashion similar to that done by MSstats (except I hate capital letters, so I used tolower() rather than toupper()).
The following block performs the metrics and filters suggested by swath2stats. These first define the decoy hit rate in the data, then filter the data based on that. It also filters out hits with less than ideal m-scores and proteins with non-optimal distributions of peptide hits (either due to too few peptides or a weird distribution of intensities).
## Get correlations on a sample by sample basis
pp(file=glue::glue("images/s2s_correlation-v{ver}.png"))
## Going to write the image to: images/s2s_correlation-v20190718.png when dev.off() is called.
sample_cond_rep_cor <- plot_correlation_between_samples(
s2s_exp, size=2,
comparison=transition_group_id ~
condition + bioreplicate + run,
fun.aggregate=mean,
column.values="intensity")
dev.off()
## png
## 2
## Do the same thing, but use the sum of the intensities peptide/protein
## instead of the mean...
sample_cond_rep_cor <- plot_correlation_between_samples(
s2s_exp, size=2,
comparison=transition_group_id ~
condition + bioreplicate + run,
fun.aggregate=sum,
column.values="intensity")
## I would love to know why it is that the spearman and pearson correlations
## in this data are so oddly different compare to previous data sets. Is this
## an artifact of the fact that this time I am _only_ looking at CF samples?
## Are the high intensity numbers messing with non-rank-based correlations?
I just realized something which should be added to me SWATH2stats fork: A simplified filter functions which invokes all of these so that I can make sure that there are no typeographikal errors introduced by my invocation of each of these things, one at a time.
## Number of non-decoy peptides: 9265
## Number of decoy peptides: 448
## Decoy rate: 0.0484
## This seems a bit high to me, yesno?
fdr_overall <- assess_fdr_overall(s2s_exp, output="Rconsole", plot=TRUE)
## The average FDR by run on assay level is 0.008
## The average FDR by run on peptide level is 0.009
## The average FDR by run on protein level is 0.029
## Target assay FDR: 0.02
## Required overall m-score cutoff: 0.0063096
## achieving assay FDR: 0.0195
## Target protein FDR: 0.02
## Required overall m-score cutoff: 0.0014125
## achieving protein FDR: 0.0193
## Original dimension: 81212, new dimension: 76342, difference: 4870.
## Peptides need to have been quantified in more conditions than: 14.4 in order to pass this percentage-based threshold.
## Fraction of peptides selected: 0.26
## Original dimension: 81212, new dimension: 46367, difference: 34845.
filtered_ms_fdr <- filter_mscore_fdr(filtered_ms, FFT=0.7, rm.decoy=TRUE,
overall_protein_fdr_target=prot_score,
upper_overall_peptide_fdr_limit=0.05)
## Target protein FDR: 0.00141253754462275
## Required overall m-score cutoff: 0.01
## achieving protein FDR: 0
## filter_mscore_fdr is filtering the data...
## finding m-score cutoff to achieve desired protein FDR in protein master list..
## finding m-score cutoff to achieve desired global peptide FDR..
## Target peptide FDR: 0.05
## Required overall m-score cutoff: 0.01
## Achieving peptide FDR: 0
## Proteins selected:
## Total proteins selected: 2020
## Final target proteins: 2020
## Final decoy proteins: 0
## Peptides mapping to these protein entries selected:
## Total mapping peptides: 8679
## Final target peptides: 8679
## Final decoy peptides: 0
## Total peptides selected from:
## Total peptides: 8679
## Final target peptides: 8679
## Final decoy peptides: 0
## Individual run FDR quality of the peptides was not calculated
## as not every run contains a decoy.
## The decoys have been removed from the returned data.
## Number of proteins detected: 2049
## Protein identifiers: Rv1270c, Rv3669, Rv0161, Rv1306, Rv2540c, Rv3818
## Number of proteins detected that are supported by a proteotypic peptide: 1952
## Number of proteotypic peptides detected: 8600
## Number of proteins detected: 1954
## First 6 protein identifiers: Rv1270c, Rv3669, Rv0161, Rv1306, Rv2540c, Rv3818
filtered_ms_fdr_pr_all_str <- filter_on_max_peptides(data=filtered_ms_fdr_pr_all,
n_peptides=10, rm.decoy=TRUE)
## Before filtering:
## Number of proteins: 1952
## Number of peptides: 8600
##
## Percentage of peptides removed: 11.01%
##
## After filtering:
## Number of proteins: 1933
## Number of peptides: 7653
old_filtered_all_filters <- filter_on_min_peptides(data=filtered_ms_fdr_pr_all_str,
n_peptides=3, rm.decoy=TRUE)
## Before filtering:
## Number of proteins: 1933
## Number of peptides: 7653
##
## Percentage of peptides removed: 0.17%
##
## After filtering:
## Number of proteins: 1530
## Number of peptides: 7640
filtered_all_filters <- s2s_all_filters(s2s_exp, target_fdr=0.1, mscore=0.1, upper_fdr=0.1,
do_min=FALSE)
## Number of non-decoy peptides: 9265
## Number of decoy peptides: 448
## Decoy rate: 0.0484
## There were 81212 observations and 942 decoy observations.
## The average FDR by run on assay level is 0.008
## The average FDR by run on peptide level is 0.009
## The average FDR by run on protein level is 0.029
## Target assay FDR: 0.1
## Required overall m-score cutoff: 0.01
## achieving assay FDR: 0.0306
## Target protein FDR: 0.1
## Required overall m-score cutoff: 0.0070795
## achieving protein FDR: 0.0937
## Starting mscore filter.
## Starting mscore filter.
## Original dimension: 81212, new dimension: 81197, difference: 15.
## Starting freqobs filter.
## Peptides need to have been quantified in more conditions than: 13.5 in order to pass this percentage-based threshold.
## Fraction of peptides selected: 0.29
## Original dimension: 81197, new dimension: 50395, difference: 30802.
## Starting fdr filter.
## Target protein FDR: 0.00707945784384138
## Required overall m-score cutoff: 0.01
## achieving protein FDR: 0
## filter_mscore_fdr is filtering the data...
## finding m-score cutoff to achieve desired protein FDR in protein master list..
## finding m-score cutoff to achieve desired global peptide FDR..
## Target peptide FDR: 0.1
## Required overall m-score cutoff: 0.01
## Achieving peptide FDR: 0
## Proteins selected:
## Total proteins selected: 862
## Final target proteins: 862
## Final decoy proteins: 0
## Peptides mapping to these protein entries selected:
## Total mapping peptides: 2747
## Final target peptides: 2747
## Final decoy peptides: 0
## Total peptides selected from:
## Total peptides: 2747
## Final target peptides: 2747
## Final decoy peptides: 0
## Individual run FDR quality of the peptides was not calculated
## as not every run contains a decoy.
## The decoys have been removed from the returned data.
## Starting proteotypic filter.
## Number of proteins detected: 879
## Protein identifiers: Rv3879c, Rv0577, Rv3800c, Rv0360c, Rv1023, Rv1808
## Number of proteins detected that are supported by a proteotypic peptide: 840
## Number of proteotypic peptides detected: 2719
## Starting peptide filter.
## Number of proteins detected: 840
## First 6 protein identifiers: Rv3879c, Rv0577, Rv3800c, Rv0360c, Rv1023, Rv1808
## Starting maximum peptide filter.
## Before filtering:
## Number of proteins: 840
## Number of peptides: 2719
##
## Percentage of peptides removed: 1.95%
##
## After filtering:
## Number of proteins: 839
## Number of peptides: 2666
## Skipping min peptide filter.
## We went from 2557/9713 proteins/peptides to:
## 839/2666 proteins/peptides.
swath2stats provides a couple of ways to print out its results, one in a format specifically intended for MSstats, and another as a more canonical matrix of rows = proteins, columns = samples.
Let us reset the version back to 20190327 here.
## I think these matrixes are probably smarter to use than the raw outmatrix from tric.
## But I am not a fan of rerwriting the sample column names.
matrix_prefix <- file.path("preprocessing", "10swath2stats", ver)
if (!file.exists(matrix_prefix)) {
dir.create(matrix_prefix)
}
## I want to write a few iterations of the filtered data
## Starting with the raw and moving down, perhaps I should just
## add this logic to my fancy new filtering function?
protein_matrix_unfilt <- write_matrix_proteins(
filtered_all_filters[["raw"]], write.csv=TRUE,
filename=file.path(matrix_prefix, "protein_matrix_unfiltered.csv"))
## Protein overview matrix preprocessing/10swath2stats/20190718/protein_matrix_unfiltered.csv written to working folder.
## [1] 2557 19
protein_matrix_mscore <- write_matrix_proteins(
filtered_all_filters[["mscore_filtered"]], write.csv=TRUE,
filename=file.path(matrix_prefix, "protein_matrix_mscore.csv"))
## Protein overview matrix preprocessing/10swath2stats/20190718/protein_matrix_mscore.csv written to working folder.
## [1] 2156 19
peptide_matrix_mscore <- write_matrix_peptides(
filtered_all_filters[["mscore_filtered"]], write.csv=TRUE,
filename=file.path(matrix_prefix, "peptide_matrix_mscore.csv"))
## Peptide overview matrix preprocessing/10swath2stats/20190718/peptide_matrix_mscore.csv written to working folder.
## [1] 9265 19
protein_matrix_filtered <- write_matrix_proteins(
filtered_all_filters[["final"]], write.csv=TRUE,
filename=file.path(matrix_prefix, "protein_matrix_filtered.csv"))
## Protein overview matrix preprocessing/10swath2stats/20190718/protein_matrix_filtered.csv written to working folder.
## [1] 839 19
peptide_matrix_filtered <- write_matrix_peptides(
filtered_all_filters[["final"]], write.csv=TRUE,
filename=file.path(matrix_prefix, "peptide_matrix_filtered.csv"))
## Peptide overview matrix preprocessing/10swath2stats/20190718/peptide_matrix_filtered.csv written to working folder.
## [1] 5167 19
## Do the correlation of the sum of peptides/protein
rt_sum_cor <- plot_correlation_between_samples(
filtered_all_filters[["final"]], column.values="intensity",
fun.aggregate=sum, size=2,
comparison=transition_group_id ~
condition + bioreplicate + run)
## And their means
rt_mean_cor <- plot_correlation_between_samples(
filtered_all_filters[["final"]], column.values="intensity",
fun.aggregate=mean, size=2,
comparison=transition_group_id ~
condition + bioreplicate + run)
cols <- colnames(filtered_all_filters[["final"]])
disaggregated <- disaggregate(filtered_all_filters[["final"]], all.columns=TRUE)
## The library contains 1 transitions per precursor.
## The data table was transformed into a table containing one row per transition.
## One or several columns required by MSstats were not in the data. The columns were created and filled with NAs.
## Missing columns: productcharge, isotopelabeltype
## isotopelabeltype was filled with light.
I want to revisit aLFQ, I think it might provide better protein-level quantification methods. aLFQ looks promising, but I have not figured out valid parameters for using it.
summary(msstats_input)
devtools::load_all("~/scratch/git/aLFQ")
alfq_input <- tric_data[, c("align_origfilename", "ProteinName", "FullPeptideName", "transition_group_id",
"FullPeptideName", "Charge", "Intensity")]
colnames(alfq_input) <- c("run_id", "protein_id", "peptide_id", "transition_id", "peptide_sequence",
"precursor_charge", "transition_intensity")
alfq_input[["concentration"]] <- "?"
alfq_inference <- aLFQ::ProteinInference.default(alfq_input, consensus_proteins=FALSE,
consensus_peptides=FALSE, transition_strictness="loose",
consensus_transitions=FALSE)
alfq_quantities <- aLFQ::AbsoluteQuantification.default(alfq_inference)
summary(alfq_quantities$estimation)
## Hmm that does not look right.
msstats.org seems to provide a complete solution for performing reasonable metrics of this data.
I am currently reading: http://msstats.org/wp-content/uploads/2017/01/MSstats_v3.7.3_manual.pdf
I made some moderately intrusive changes to MSstats to make it clearer, as well.
tt <- sm(devtools::load_all("~/scratch/git/MSstats"))
msstats_ver <- "20190327"
checkpoint <- paste0("msstats_dataprocess-v", msstats_ver, ".rda")
if (file.exists(checkpoint)) {
load(file=checkpoint)
} else {
msstats_quant <- dataProcess(msstats_input)
save(file=checkpoint, list=c("msstats_quant"))
}
##checkpoint <- paste0("msstats_plots-v", msstats_ver, ".rda")
##if (file.exists(checkpoint)) {
## load(file=checkpoint)
##} else {
## msstats_plots <- dataProcessPlots(msstats_quant, type="QCPLOT")
## save(file=checkpoint, list=c("msstats_plots"))
##}
my_levels <- levels(as.factor(msstats_input$condition))
my_levels
comparisons <- make_simplified_contrast_matrix(
numerators=c("wt_filtrate", "delta_filtrate", "delta_filtrate", "delta_whole"),
denominators=c("wt_whole", "delta_whole", "wt_filtrate", "wt_whole"))
msstats_results <- list()
checkpoint <- paste0("msstats_group-v", ver, ".rda")
if (file.exists(checkpoint)) {
load(file=checkpoint)
} else {
for (c in 1:length(rownames(comparisons))) {
name <- rownames(comparisons)[c]
message("Starting ", name)
comp <- comparisons[c, ]
comp <- t(as.matrix(comp))
rownames(comp) <- name
msstats_results[[name]] <- sm(MSstats::groupComparison(contrast.matrix=comp,
data=msstats_quant))
message("Finished ", name)
}
save(file=checkpoint, list=c("msstats_results"))
}
Yan asked for the p/pe protein qc plots. ok. I changed the dataProcessPlots to return something useful, so that should be possible now.
pe_genes <- read.table("reference/annotated_pe_genes.txt")[[1]]
## Unfortunately, the names did not get set in my changed version of dataProcessPlots...
plotlst <- msstats_plots$QCPLOT
available_plots <- gsub(pattern="^1/", replacement="",
x=levels(msstats_quant$ProcessedData$PROTEIN))
names(plotlst) <- available_plots
pe_in_avail_idx <- pe_genes %in% available_plots
pe_in_avail <- pe_genes[pe_in_avail_idx]
pe_plots <- plotlst[pe_in_avail]
pdf(file="pe_qc_plots.pdf")
for (p in 1:length(pe_plots)) {
plot(pe_plots[[p]])
}
dev.off()
length(pe_plots)
Since I am not certain I understand these data, I will take the intensities from SWATH2stats, metadata, and annotation data; attempt to create a ‘normal’ expressionset; poke at it to see what I can learn.
I want to use the same metadata as were used for MSstats. It has a few important differences from the requirements of hpgltools: pretty much only that I do not allow rownames/sampleIDs to start with a number.
I do not want the \1 before the protein names, I already merged them into one entry per gene via SWATH2stats.
There are two ways to get the matrix of intensities, either directly from pyprophet, or from the filtered data provided by swath2stats. Here is the former, but I should be using the latter.
prot_mtrx <- read.csv(file.path("preprocessing", "10swath2stats", ver, "protein_matrix_filtered.csv"))
rownames(prot_mtrx) <- gsub(pattern="^1\\/", replacement="", x=prot_mtrx[["proteinname"]])
prot_mtrx <- prot_mtrx[, -1]
prot_keepers <- grepl(pattern="^Rv", x=rownames(prot_mtrx))
prot_mtrx <- prot_mtrx[prot_keepers, ]
## Important question: Did SWATH2stats reorder my data?
colnames(prot_mtrx) <- gsub(pattern="^(.*)(2018.*)$", replacement="s\\2", x=colnames(prot_mtrx))
unfilt_mtrx <- read.csv(file.path("preprocessing", "10swath2stats", ver, "protein_matrix_unfiltered.csv"))
rownames(unfilt_mtrx) <- gsub(pattern="^1\\/", replacement="", x=unfilt_mtrx[["proteinname"]])
unfilt_mtrx <- unfilt_mtrx[, -1]
unfilt_keepers <- grepl(pattern="^Rv", x=rownames(unfilt_mtrx))
unfilt_mtrx <- unfilt_mtrx[unfilt_keepers, ]
## Important question: Did SWATH2stats reorder my data?
colnames(unfilt_mtrx) <- gsub(pattern="^(.*)(2018.*)$", replacement="s\\2", x=colnames(unfilt_mtrx))
prot_mtrx <- protein_matrix_filtered
colnames(prot_mtrx) <- gsub(x=colnames(prot_mtrx), pattern="^(.*)_(2019.*)$", replacement="\\2")
rownames(prot_mtrx) <- prot_mtrx[["proteinname"]]
prot_mtrx[["proteinname"]] <- NULL
rv_idx <- grepl(x=rownames(prot_mtrx), pattern="^Rv")
prot_mtrx <- prot_mtrx[rv_idx, ]
unfilt_mtrx <- protein_matrix_unfilt
colnames(unfilt_mtrx) <- gsub(x=colnames(unfilt_mtrx), pattern="^(.*)_(2019.*)$", replacement="\\2")
rownames(unfilt_mtrx) <- gsub(pattern="^1\\/", replacement="", x=unfilt_mtrx[["proteinname"]])
unfilt_mtrx[["proteinname"]] <- NULL
rv_idx <- grepl(x=rownames(unfilt_mtrx), pattern="^Rv")
unfilt_mtrx <- unfilt_mtrx[rv_idx, ]
Now we should have sufficient pieces to make an expressionset.
While here, I will also split the data into a cf and whole-cell pair of data structures.
## Drop the metadata not in the protein matrix:
## And ensure that they are the same order.
reordered <- colnames(prot_mtrx)
metadata <- sample_annot[reordered, ]
colnames(prot_mtrx) <- gsub(x=colnames(prot_mtrx), pattern="^.*(2019.*$)", replacement="s\\1")
mtb_annotations <- mtb_annotations[, -1]
protein_expt <- create_expt(sample_annot,
count_dataframe=prot_mtrx,
gene_info=mtb_annotations)
## Reading the sample metadata.
## The sample definitions comprises: 18 rows(samples) and 16 columns(metadata fields).
## Matched 835 annotations and counts.
## Bringing together the count matrix and gene information.
## Some annotations were lost in merging, setting them to 'undefined'.
## The final expressionset has 838 rows and 18 columns.
reordered <- colnames(unfilt_mtrx)
metadata <- sample_annot[reordered, ]
colnames(unfilt_mtrx) <- gsub(x=colnames(unfilt_mtrx), pattern="^.*(2019.*$)", replacement="s\\1")
mtb_annotations <- mtb_annotations[, -1]
unfilt_expt <- create_expt(sample_annot,
count_dataframe=unfilt_mtrx,
gene_info=mtb_annotations)
## Reading the sample metadata.
## The sample definitions comprises: 18 rows(samples) and 16 columns(metadata fields).
## Matched 2061 annotations and counts.
## Bringing together the count matrix and gene information.
## Some annotations were lost in merging, setting them to 'undefined'.
## The final expressionset has 2078 rows and 18 columns.
## There were 18, now there are 17 samples.
## There were 18, now there are 17 samples.
##protein_sub <- subset_expt(protein_expt, subset="batch=='early'")
written_file <- glue::glue("excel/{rundate}_protein_expt-v{ver}.xlsx")
protein_write <- write_expt(protein_sub, violin=TRUE,
batch="raw", excel=written_file)
## Writing the first sheet, containing a legend and some summary data.
## Writing the raw reads.
## Graphing the raw reads.
## Attempting mixed linear model with: ~ (1|condition) + (1|batch)
## Fitting the expressionset to the model, this is slow.
## Warning in serialize(data, node$con): 'package:variancePartition' may not
## be available when loading
## Warning in serialize(data, node$con): 'package:variancePartition' may not
## be available when loading
## Warning in serialize(data, node$con): 'package:variancePartition' may not
## be available when loading
## Warning in serialize(data, node$con): 'package:variancePartition' may not
## be available when loading
## Warning in serialize(data, node$con): 'package:variancePartition' may not
## be available when loading
## Warning in serialize(data, node$con): 'package:variancePartition' may not
## be available when loading
##
## Finished...
## Total: 6 s
## Placing factor: condition at the beginning of the model.
## Writing the normalized reads.
## Graphing the normalized reads.
## Attempting mixed linear model with: ~ (1|condition) + (1|batch)
## Fitting the expressionset to the model, this is slow.
##
## Finished...
## Total: 7 s
## Placing factor: condition at the beginning of the model.
## Writing the median reads by factor.
## The factor wt_cf has 6 rows.
## The factor dt_cf has 5 rows.
## The factor cp_cf has 6 rows.
## Note: zip::zip() is deprecated, please use zip::zipr() instead
written_file <- glue::glue("excel/{rundate}_unfilt_protein_expt-v{ver}.xlsx")
unfilt_write <- write_expt(unfilt_sub, violin=TRUE,
batch="raw", excel=written_file)
## Writing the first sheet, containing a legend and some summary data.
## Writing the raw reads.
## Graphing the raw reads.
## Attempting mixed linear model with: ~ (1|condition) + (1|batch)
## Fitting the expressionset to the model, this is slow.
##
## Finished...
## Total: 15 s
## Placing factor: condition at the beginning of the model.
## Writing the normalized reads.
## Graphing the normalized reads.
## Attempting mixed linear model with: ~ (1|condition) + (1|batch)
## Fitting the expressionset to the model, this is slow.
##
## Finished...
## Total: 13 s
## Placing factor: condition at the beginning of the model.
## Writing the median reads by factor.
## The factor wt_cf has 6 rows.
## The factor dt_cf has 5 rows.
## The factor cp_cf has 6 rows.
protein_filt <- sm(normalize_expt(protein_sub, filter=TRUE))
protein_unfilt <- sm(normalize_expt(unfilt_sub, filter=TRUE))
protein_de <- sm(all_pairwise(protein_filt, model_batch=FALSE, force=TRUE, parallel=FALSE))
keepers <- list(
"dt_wt" = c("dt_cf", "wt_cf"),
"cp_wt" = c("cp_cf", "wt_cf"))
protein_tables <- sm(combine_de_tables(
protein_de, keepers=keepers,
excel=glue::glue("excel/de_{rundate}_tables_v{ver}.xlsx")))
protein_sig <- extract_significant_genes(
protein_tables,
excel=glue::glue("excel/sig_{rundate}_tables_v{ver}.xlsx"))
## Writing a legend of columns.
## Writing excel data according to limma for dt_wt: 1/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 0 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 0 genes.
## Writing excel data according to limma for cp_wt: 2/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 0 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 0 genes.
## Printing significant genes to the file: excel/sig_20190822_tables_v20190718.xlsx
## The up table dt_wt is empty.
## The down table dt_wt is empty.
## The up table cp_wt is empty.
## The down table cp_wt is empty.
## Writing excel data according to edger for dt_wt: 1/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 0 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 0 genes.
## Writing excel data according to edger for cp_wt: 2/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 2 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 2 genes.
## Printing significant genes to the file: excel/sig_20190822_tables_v20190718.xlsx
## The up table dt_wt is empty.
## The down table dt_wt is empty.
## The up table cp_wt is empty.
## Writing excel data according to deseq for dt_wt: 1/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 0 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 0 genes.
## Writing excel data according to deseq for cp_wt: 2/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 1 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 1 genes.
## Printing significant genes to the file: excel/sig_20190822_tables_v20190718.xlsx
## The up table dt_wt is empty.
## The down table dt_wt is empty.
## The up table cp_wt is empty.
## Writing excel data according to ebseq for dt_wt: 1/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 0 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 0 genes.
## Writing excel data according to ebseq for cp_wt: 2/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 1 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 1 genes.
## Printing significant genes to the file: excel/sig_20190822_tables_v20190718.xlsx
## The up table dt_wt is empty.
## The down table dt_wt is empty.
## The up table cp_wt is empty.
## Writing excel data according to basic for dt_wt: 1/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 0 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 0 genes.
## Writing excel data according to basic for cp_wt: 2/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 0 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 0 genes.
## Printing significant genes to the file: excel/sig_20190822_tables_v20190718.xlsx
## The up table dt_wt is empty.
## The down table dt_wt is empty.
## The up table cp_wt is empty.
## The down table cp_wt is empty.
## Adding significance bar plots.
unfilt_tables <- sm(combine_de_tables(
unfilt_de, keepers=keepers,
excel=glue::glue("excel/de_{rundate}_unfilt_tables_v{ver}.xlsx")))
unfilt_sig <- extract_significant_genes(
unfilt_tables,
excel=glue::glue("excel/sig_{rundate}_unfilt_tables_v{ver}.xlsx"))
## Writing a legend of columns.
## Writing excel data according to limma for dt_wt: 1/10.
## After (adj)p filter, the up genes table has 2 genes.
## After (adj)p filter, the down genes table has 2 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 1 genes.
## Writing excel data according to limma for cp_wt: 2/10.
## After (adj)p filter, the up genes table has 1 genes.
## After (adj)p filter, the down genes table has 2 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 1 genes.
## Printing significant genes to the file: excel/sig_20190822_unfilt_tables_v20190718.xlsx
## The up table dt_wt is empty.
## The up table cp_wt is empty.
## Writing excel data according to edger for dt_wt: 1/10.
## After (adj)p filter, the up genes table has 6 genes.
## After (adj)p filter, the down genes table has 2 genes.
## After fold change filter, the up genes table has 6 genes.
## After fold change filter, the down genes table has 2 genes.
## Writing excel data according to edger for cp_wt: 2/10.
## After (adj)p filter, the up genes table has 4 genes.
## After (adj)p filter, the down genes table has 1 genes.
## After fold change filter, the up genes table has 4 genes.
## After fold change filter, the down genes table has 1 genes.
## Printing significant genes to the file: excel/sig_20190822_unfilt_tables_v20190718.xlsx
## 1/2: Creating significant table up_edger_dt_wt
## 2/2: Creating significant table up_edger_cp_wt
## Writing excel data according to deseq for dt_wt: 1/10.
## After (adj)p filter, the up genes table has 101 genes.
## After (adj)p filter, the down genes table has 24 genes.
## After fold change filter, the up genes table has 101 genes.
## After fold change filter, the down genes table has 24 genes.
## Writing excel data according to deseq for cp_wt: 2/10.
## After (adj)p filter, the up genes table has 91 genes.
## After (adj)p filter, the down genes table has 24 genes.
## After fold change filter, the up genes table has 91 genes.
## After fold change filter, the down genes table has 23 genes.
## Printing significant genes to the file: excel/sig_20190822_unfilt_tables_v20190718.xlsx
## 1/2: Creating significant table up_deseq_dt_wt
## 2/2: Creating significant table up_deseq_cp_wt
## Writing excel data according to ebseq for dt_wt: 1/10.
## After (adj)p filter, the up genes table has 21 genes.
## After (adj)p filter, the down genes table has 20 genes.
## After fold change filter, the up genes table has 21 genes.
## After fold change filter, the down genes table has 20 genes.
## Writing excel data according to ebseq for cp_wt: 2/10.
## After (adj)p filter, the up genes table has 11 genes.
## After (adj)p filter, the down genes table has 17 genes.
## After fold change filter, the up genes table has 11 genes.
## After fold change filter, the down genes table has 16 genes.
## Printing significant genes to the file: excel/sig_20190822_unfilt_tables_v20190718.xlsx
## 1/2: Creating significant table up_ebseq_dt_wt
## 2/2: Creating significant table up_ebseq_cp_wt
## Writing excel data according to basic for dt_wt: 1/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 0 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 0 genes.
## Writing excel data according to basic for cp_wt: 2/10.
## After (adj)p filter, the up genes table has 0 genes.
## After (adj)p filter, the down genes table has 0 genes.
## After fold change filter, the up genes table has 0 genes.
## After fold change filter, the down genes table has 0 genes.
## Printing significant genes to the file: excel/sig_20190822_unfilt_tables_v20190718.xlsx
## The up table dt_wt is empty.
## The down table dt_wt is empty.
## The up table cp_wt is empty.
## The down table cp_wt is empty.
## Adding significance bar plots.
## Put NAs into the data for the set of proteins for which there are
## 0s in _not_all_ of the samples for each condition.
protein_nas <- add_conditional_nas(unfilt_sub)
## In condition wt_cf there are 501 rows which are all zero.
## In condition dt_cf there are 380 rows which are all zero.
## In condition cp_cf there are 415 rows which are all zero.
unfilt_nas_de <- all_pairwise(protein_nas, model_batch=FALSE, parallel=FALSE, do_ebseq=FALSE,
test_pca=FALSE, force=TRUE)
## Starting basic_pairwise().
## Starting basic pairwise comparison.
## Leaving the data alone, regardless of normalization state.
## Basic step 0/3: Transforming data.
## Basic step 1/3: Creating mean and variance tables.
## Basic step 2/3: Performing 6 comparisons.
## Basic step 3/3: Creating faux DE Tables.
## Basic: Returning tables.
## Starting deseq_pairwise().
## Starting DESeq2 pairwise comparisons.
## About to round the data, this is a pretty terrible thing to do. But if you, like me, want to see what happens when you put non-standard data into deseq, then here you go.
## Warning in choose_binom_dataset(input, force = force): This data was
## inappropriately forced into integers.
## Choosing the non-intercept containing model.
## DESeq2 step 1/5: Including only condition in the deseq model.
## Warning in import_deseq(data, column_data, model_string, tximport =
## input[["tximport"]][["raw"]]): Converted down 311 elements because they are
## larger than the maximum integer size.
## converting counts to integer mode
## DESeq2 step 2/5: Estimate size factors.
## DESeq2 step 3/5: Estimate dispersions.
## gene-wise dispersion estimates
## mean-dispersion relationship
## final dispersion estimates
## Using a parametric fitting seems to have worked.
## DESeq2 step 4/5: nbinomWaldTest.
## Starting edger_pairwise().
## Starting edgeR pairwise comparisons.
## About to round the data, this is a pretty terrible thing to do. But if you, like me, want to see what happens when you put non-standard data into deseq, then here you go.
## Warning in choose_binom_dataset(input, force = force): This data was
## inappropriately forced into integers.
## Choosing the non-intercept containing model.
## EdgeR step 1/9: Importing and normalizing data.
## EdgeR step 2/9: Estimating the common dispersion.
## EdgeR step 3/9: Estimating dispersion across genes.
## EdgeR step 4/9: Estimating GLM Common dispersion.
## EdgeR step 5/9: Estimating GLM Trended dispersion.
## EdgeR step 6/9: Estimating GLM Tagged dispersion.
## EdgeR step 7/9: Running glmFit, switch to glmQLFit by changing the argument 'edger_test'.
## EdgeR step 8/9: Making pairwise contrasts.
## Starting limma_pairwise().
## Starting limma pairwise comparison.
## Leaving the data alone, regardless of normalization state.
## libsize was not specified, this parameter has profound effects on limma's result.
## Using the libsize from expt$libsize.
## Limma step 1/6: choosing model.
## Choosing the non-intercept containing model.
## Limma step 2/6: running hpgl_voom(), switch with the argument 'which_voom'.
## The voom input was not cpm, converting now.
## The voom input was not log2, transforming now.
## Warning in regularize.values(x, y, ties, missing(ties)): collapsing to
## unique 'x' values
## Limma step 3/6: running lmFit with method: ls.
## Limma step 4/6: making and fitting contrasts with no intercept. (~ 0 + factors)
## Limma step 5/6: Running eBayes with robust=FALSE and trend=FALSE.
## Limma step 6/6: Writing limma outputs.
## Limma step 6/6: 1/3: Creating table: dt_cf_vs_cp_cf. Adjust=BH
## Limma step 6/6: 2/3: Creating table: wt_cf_vs_cp_cf. Adjust=BH
## Limma step 6/6: 3/3: Creating table: wt_cf_vs_dt_cf. Adjust=BH
## Limma step 6/6: 1/3: Creating table: cp_cf. Adjust=BH
## Limma step 6/6: 2/3: Creating table: dt_cf. Adjust=BH
## Limma step 6/6: 3/3: Creating table: wt_cf. Adjust=BH
## Comparing analyses.
unfilt_nas_tables <- combine_de_tables(unfilt_nas_de, keepers=keepers,
excel="excel/unfilt_nas_table.xlsx")
## Deleting the file excel/unfilt_nas_table.xlsx before writing the tables.
## Writing a legend of columns.
## Printing a pca plot before/after surrogates/batch estimation.
## Working on 1/2: dt_wt which is: dt_cf/wt_cf.
## Found inverse table with wt_cf_vs_dt_cf
## The ebseq table is null.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
## Working on 2/2: cp_wt which is: cp_cf/wt_cf.
## Found inverse table with wt_cf_vs_cp_cf
## The ebseq table is null.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
## Used Bon Ferroni corrected t test(s) between columns.
## Adding venn plots for dt_wt.
## Limma expression coefficients for dt_wt; R^2: 0.996; equation: y = 1.02x - 0.0505
## Edger expression coefficients for dt_wt; R^2: 0.995; equation: y = 0.997x + 0.0933
## DESeq2 expression coefficients for dt_wt; R^2: 0.995; equation: y = 1x + 0.00313
## Adding venn plots for cp_wt.
## Limma expression coefficients for cp_wt; R^2: 0.997; equation: y = 1.01x + 0.117
## Edger expression coefficients for cp_wt; R^2: 0.996; equation: y = 0.999x + 0.0712
## DESeq2 expression coefficients for cp_wt; R^2: 0.996; equation: y = 1x + 0.00807
## Writing summary information.
## Performing save of excel/unfilt_nas_table.xlsx.
Let us pull the following subset from the DE tables for Volker, it should provide a set of proteins most obviously of interest; assuming the false negatives are not too severe.
This will hopefully find things which are sufficiently different from the deletion and complement samples to be interesting.
dt_wt <- unfilt_nas_tables[["data"]][["dt_wt"]]
cp_wt <- unfilt_nas_tables[["data"]][["cp_wt"]]
down_idx <- dt_wt[["deseq_logfc"]] <= -6 & cp_wt[["deseq_logfc"]] >= -3
down_table <- dt_wt[down_idx, ]
up_idx <- dt_wt[["deseq_logfc"]] >= 10 & cp_wt[["deseq_logfc"]] <= 4
up_table <- dt_wt[up_idx, ]
down_subset <- write_xls(
data=down_table,
excel=glue::glue("excel/de_{rundate}_more_down_delta.xlsx"))
## Saving to: excel/de_20190822_more_down_delta.xlsx
## Saving to: excel/de_20190822_more_up_delta.xlsx
if (!isTRUE(get0("skip_load"))) {
message(paste0("This is hpgltools commit: ", get_git_commit()))
this_save <- paste0(gsub(pattern="\\.Rmd", replace="", x=rmd_file), "-v", ver, ".rda.xz")
message(paste0("Saving to ", this_save))
tmp <- sm(saveme(filename=this_save))
pander::pander(sessionInfo())
}
## If you wish to reproduce this exact build of hpgltools, invoke the following:
## > git clone http://github.com/abelew/hpgltools.git
## > git reset 8ca465bb9928ffe95082f64aed9cf64799bbf8e6
## This is hpgltools commit: Wed Jul 31 16:40:59 2019 -0400: 8ca465bb9928ffe95082f64aed9cf64799bbf8e6
## Saving to 03_swath2stats_20190718-v20190718.rda.xz
R version 3.6.0 (2019-04-26)
Platform: x86_64-pc-linux-gnu (64-bit)
locale: LC_CTYPE=en_US.UTF-8, LC_NUMERIC=C, LC_TIME=en_US.UTF-8, LC_COLLATE=en_US.UTF-8, LC_MONETARY=en_US.UTF-8, LC_MESSAGES=en_US.UTF-8, LC_PAPER=en_US.UTF-8, LC_NAME=C, LC_ADDRESS=C, LC_TELEPHONE=C, LC_MEASUREMENT=en_US.UTF-8 and LC_IDENTIFICATION=C
attached base packages: parallel, stats, graphics, grDevices, utils, datasets, methods and base
other attached packages: foreach(v.1.4.7), edgeR(v.3.26.7), variancePartition(v.1.14.0), SWATH2stats(v.1.13.5), testthat(v.2.2.1), hpgltools(v.1.0), Biobase(v.2.44.0) and BiocGenerics(v.0.30.0)
loaded via a namespace (and not attached): tidyselect(v.0.2.5), lme4(v.1.1-21), htmlwidgets(v.1.3), RSQLite(v.2.1.2), AnnotationDbi(v.1.46.0), grid(v.3.6.0), BiocParallel(v.1.18.1), Rtsne(v.0.15), devtools(v.2.1.0), munsell(v.0.5.0), codetools(v.0.2-16), preprocessCore(v.1.46.0), withr(v.2.1.2), colorspace(v.1.4-1), GOSemSim(v.2.10.0), knitr(v.1.24), rstudioapi(v.0.10), stats4(v.3.6.0), Vennerable(v.3.1.0.9000), robustbase(v.0.93-5), DOSE(v.3.10.2), labeling(v.0.3), urltools(v.1.7.3), GenomeInfoDbData(v.1.2.1), polyclip(v.1.10-0), bit64(v.0.9-7), farver(v.1.1.0), rprojroot(v.1.3-2), vctrs(v.0.2.0), xfun(v.0.8), R6(v.2.4.0), doParallel(v.1.0.15), GenomeInfoDb(v.1.20.0), locfit(v.1.5-9.1), bitops(v.1.0-6), fgsea(v.1.10.0), gridGraphics(v.0.4-1), DelayedArray(v.0.10.0), assertthat(v.0.2.1), scales(v.1.0.0), nnet(v.7.3-12), ggraph(v.1.0.2), enrichplot(v.1.4.0), gtable(v.0.3.0), sva(v.3.32.1), processx(v.3.4.1), rlang(v.0.4.0), zeallot(v.0.1.0), genefilter(v.1.66.0), splines(v.3.6.0), rtracklayer(v.1.44.2), lazyeval(v.0.2.2), acepack(v.1.4.1), checkmate(v.1.9.4), europepmc(v.0.3), yaml(v.2.2.0), reshape2(v.1.4.3), GenomicFeatures(v.1.36.4), backports(v.1.1.4), qvalue(v.2.16.0), Hmisc(v.4.2-0), RBGL(v.1.60.0), clusterProfiler(v.3.12.0), tools(v.3.6.0), usethis(v.1.5.1), ggplotify(v.0.0.4), ggplot2(v.3.2.1), gplots(v.3.0.1.1), RColorBrewer(v.1.1-2), blockmodeling(v.0.3.4), sessioninfo(v.1.1.1), ggridges(v.0.5.1), Rcpp(v.1.0.2), plyr(v.1.8.4), base64enc(v.0.1-3), progress(v.1.2.2), zlibbioc(v.1.30.0), purrr(v.0.3.2), RCurl(v.1.95-4.12), ps(v.1.3.0), prettyunits(v.1.0.2), rpart(v.4.1-15), viridis(v.0.5.1), cowplot(v.1.0.0), S4Vectors(v.0.22.0), SummarizedExperiment(v.1.14.1), ggrepel(v.0.8.1), cluster(v.2.1.0), colorRamps(v.2.3), fs(v.1.3.1), magrittr(v.1.5), data.table(v.1.12.2), DO.db(v.2.9), openxlsx(v.4.1.0.1), BRAIN(v.1.30.0), triebeard(v.0.3.0), matrixStats(v.0.54.0), pkgload(v.1.0.2), hms(v.0.5.0), evaluate(v.0.14), xtable(v.1.8-4), pbkrtest(v.0.4-7), XML(v.3.98-1.20), IRanges(v.2.18.1), gridExtra(v.2.3), compiler(v.3.6.0), biomaRt(v.2.40.3), tibble(v.2.1.3), KernSmooth(v.2.23-15), crayon(v.1.3.4), minqa(v.1.2.4), htmltools(v.0.3.6), mgcv(v.1.8-28), corpcor(v.1.6.9), snow(v.0.4-3), Formula(v.1.2-3), geneplotter(v.1.62.0), tidyr(v.0.8.3), DBI(v.1.0.0), tweenr(v.1.0.1), MASS(v.7.3-51.4), boot(v.1.3-23), Matrix(v.1.2-17), readr(v.1.3.1), cli(v.1.1.0), quadprog(v.1.5-7), gdata(v.2.18.0), igraph(v.1.2.4.1), GenomicRanges(v.1.36.0), pkgconfig(v.2.0.2), registry(v.0.5-1), rvcheck(v.0.1.3), GenomicAlignments(v.1.20.1), foreign(v.0.8-72), xml2(v.1.2.2), annotate(v.1.62.0), rngtools(v.1.4), pkgmaker(v.0.27), XVector(v.0.24.0), bibtex(v.0.4.2), doRNG(v.1.7.1), EBSeq(v.1.24.0), stringr(v.1.4.0), callr(v.3.3.1), PolynomF(v.2.0-2), digest(v.0.6.20), graph(v.1.62.0), Biostrings(v.2.52.0), rmarkdown(v.1.14), fastmatch(v.1.1-0), htmlTable(v.1.13.1), directlabels(v.2018.05.22), Rsamtools(v.2.0.0), gtools(v.3.8.1), nloptr(v.1.2.1), nlme(v.3.1-141), jsonlite(v.1.6), desc(v.1.2.0), viridisLite(v.0.3.0), limma(v.3.40.6), pillar(v.1.4.2), lattice(v.0.20-38), DEoptimR(v.1.0-8), httr(v.1.4.1), pkgbuild(v.1.0.4), survival(v.2.44-1.1), GO.db(v.3.8.2), glue(v.1.3.1), remotes(v.2.1.0), zip(v.2.0.3), UpSetR(v.1.4.0), iterators(v.1.0.12), pander(v.0.6.3), bit(v.1.1-14), ggforce(v.0.3.0), stringi(v.1.4.3), blob(v.1.2.0), DESeq2(v.1.24.0), doSNOW(v.1.0.18), latticeExtra(v.0.6-28), caTools(v.1.17.1.2), memoise(v.1.1.0) and dplyr(v.0.8.3)