115 Commits
v3.0 ... v4.2

Author SHA1 Message Date
eugenefischer
a5a17d1f76 Revert previous commit 2022-10-01 18:23:31 -05:00
eugenefischer
0f3ab0fdd7 Section link test 2022-10-01 18:22:55 -05:00
eugenefischer
01596ef43a Rename sections 2022-10-01 18:16:08 -05:00
eugenefischer
cda25a2c62 Update performance section and TODO 2022-10-01 18:12:33 -05:00
eugenefischer
bde6da3076 fix typo 2022-10-01 16:12:21 -05:00
eugenefischer
2eede214c0 fix typo 2022-10-01 16:11:32 -05:00
eugenefischer
98ce708825 Remove questionable claim, reorder simulation experiments 2022-10-01 15:46:22 -05:00
eugenefischer
e7e85a4542 Comment out questionable claim 2022-10-01 15:44:29 -05:00
eugenefischer
c0dd2d31f2 Update version number 2022-10-01 15:21:33 -05:00
eugenefischer
cf103c5223 Add flag to enable p-value calculation 2022-10-01 14:36:22 -05:00
eugenefischer
26f66fe139 Remove outdated comments 2022-10-01 14:35:35 -05:00
eugenefischer
89295777ef Update output example 2022-10-01 14:30:46 -05:00
eugenefischer
99c92e6eb5 Update TODO 2022-10-01 14:21:23 -05:00
eugenefischer
b82176517c Update TOC, command line options 2022-10-01 13:59:03 -05:00
eugenefischer
0657db5653 tyoo 2022-10-01 13:44:17 -05:00
eugenefischer
9f0ac227e2 Clarify steps and reasoning behind the algorithm 2022-10-01 13:43:14 -05:00
eugenefischer
54896bc47f Correct typo, remove redundant information 2022-10-01 13:01:44 -05:00
eugenefischer
b19a4d37c2 Update readme with newer results, new menu options 2022-10-01 13:00:33 -05:00
eugenefischer
457d643477 Make calculation of p-values optional, defaulting to off 2022-09-30 03:17:58 -05:00
eugenefischer
593dd6c60f Add sample cell filename, cell sample size, and sample plate size to metadata 2022-09-30 02:58:15 -05:00
eugenefischer
b8aeeb988f Add sequence dropout rate to metadata output 2022-09-30 00:33:41 -05:00
eugenefischer
b9b13fb75e Rename dropout rate flag 2022-09-29 23:58:08 -05:00
eugenefischer
289220e0d0 Remove statements about pre-filtering types. Can add that back if I ever actually parameterize that. 2022-09-29 22:10:42 -05:00
eugenefischer
19badac92b Correct misstatement of filter condition in Algorithm section 2022-09-29 18:32:42 -05:00
eugenefischer
633334a1b8 Update Theory section, add Contents and Algorithm section. 2022-09-29 18:30:07 -05:00
eugenefischer
e308e47578 Correct error in comments 2022-09-29 18:29:43 -05:00
eugenefischer
133984276f Change access modifiers and add count of wells removed to output 2022-09-29 16:03:10 -05:00
eugenefischer
ec6713a1c0 Implement filtering for wells with anomalous read counts 2022-09-29 16:03:10 -05:00
097590cf21 Add method to remove a well from the SequenceRecord (git committed as past self due to IDE misclick) 2022-09-29 16:03:10 -05:00
eugenefischer
f1e4c4f194 Remove duplicate output statements 2022-09-29 01:05:36 -05:00
eugenefischer
b6218c3ed3 update version 2022-09-29 00:53:11 -05:00
eugenefischer
756e5572b9 update readme 2022-09-29 00:00:19 -05:00
eugenefischer
c30167d5ec Change real sequence collision so it isn't biased toward sequences in the earlier wells. 2022-09-28 23:15:55 -05:00
eugenefischer
a19525f5bb update readme 2022-09-28 23:01:59 -05:00
eugenefischer
e5803defa3 Bug fix, add comments 2022-09-28 18:09:47 -05:00
eugenefischer
34dc2a5721 Add real sequence collision rate 2022-09-28 17:54:55 -05:00
eugenefischer
fd106a0d73 Add real sequence collision rate 2022-09-28 17:46:09 -05:00
eugenefischer
22faad3414 Add real sequence collision rate 2022-09-28 17:45:09 -05:00
eugenefischer
0b36e2b742 Rewrite countSequences to allow for collision with real sequences on misreads 2022-09-28 17:44:26 -05:00
eugenefischer
9dacd8cd34 Add real sequence collision rate 2022-09-28 17:43:21 -05:00
eugenefischer
89687fa849 Add real sequence collision rate, make fields final 2022-09-28 17:43:06 -05:00
eugenefischer
fb443fe958 Revert "Add getCell and getRandomCell methods"
This reverts commit adebe1542e.
2022-09-28 14:36:20 -05:00
eugenefischer
adebe1542e Add getCell and getRandomCell methods 2022-09-28 13:49:50 -05:00
eugenefischer
882fbfffc6 Purge old code 2022-09-28 13:40:13 -05:00
eugenefischer
a88cfb8b0d Add read counts for individual wells to graphml output 2022-09-28 13:38:38 -05:00
eugenefischer
deed98e79d Bugfix 2022-09-28 12:58:14 -05:00
eugenefischer
1a35600f50 Add method to get read count from individual wells 2022-09-28 12:57:45 -05:00
eugenefischer
856063529b Read depth simulation is now compatible with plate caching 2022-09-28 12:47:00 -05:00
eugenefischer
b7c86f20b3 Add read depth attributes to graphml output 2022-09-28 03:01:52 -05:00
eugenefischer
3a47efd361 Update TODO 2022-09-28 03:01:03 -05:00
eugenefischer
58bb04c431 Remove redundant toString() calls 2022-09-28 02:08:17 -05:00
eugenefischer
610da68262 Refactor Vertex class to use SequenceRecords 2022-09-28 00:58:44 -05:00
eugenefischer
9973473cc6 Make serializable and implement getWellOccupancies method 2022-09-28 00:58:02 -05:00
eugenefischer
8781afd74c Reorder conditional 2022-09-28 00:57:06 -05:00
eugenefischer
88b6c79caa Refactor to simplify graph creation code 2022-09-28 00:07:59 -05:00
eugenefischer
35a519d499 update TODO 2022-09-27 22:20:57 -05:00
eugenefischer
5bd1e568a6 update TODO 2022-09-27 15:08:16 -05:00
eugenefischer
4ad1979c18 Add read depth simulation options to CLI 2022-09-27 15:05:50 -05:00
eugenefischer
423c9d5c93 Add read depth simulation options to CLI 2022-09-27 14:35:55 -05:00
eugenefischer
7c3c95ab4b update TODO in readme 2022-09-27 14:11:21 -05:00
eugenefischer
d71a99555c clean up metadata 2022-09-27 12:15:12 -05:00
eugenefischer
2bf2a9f5f7 Add comments 2022-09-27 11:51:51 -05:00
eugenefischer
810abdb705 Add read depth parameters to output metadata 2022-09-27 11:13:12 -05:00
eugenefischer
f7b3c133bf Add filtering based on occupancy/read count discrepancy 2022-09-26 23:39:18 -05:00
eugenefischer
14fcfe1ff3 spacing 2022-09-26 23:38:56 -05:00
eugenefischer
70fec95a00 Bug fix 2022-09-26 23:17:18 -05:00
eugenefischer
077af3b46e Clear plate in memory when simulating read depth 2022-09-26 23:17:10 -05:00
eugenefischer
db99c74810 Rework read depth simulation to allow edge weight calculations to work as expected. (This changes sample plate in memory, so caching the sample plate is incompatible) 2022-09-26 23:03:23 -05:00
eugenefischer
13a1af1f71 placeholder values until CLI is updated to support read depth simulation 2022-09-26 19:43:29 -05:00
eugenefischer
199c81f983 Implement read count for vertices 2022-09-26 19:42:19 -05:00
eugenefischer
19a2a35f07 Refactor plate assay methods to use maps passed as parameters rather than returning maps 2022-09-26 17:00:25 -05:00
eugenefischer
36c628cde5 Add code to simulate read depth 2022-09-26 16:52:56 -05:00
eugenefischer
1ddac63b0a Add exception handling 2022-09-26 14:28:35 -05:00
eugenefischer
e795b4cdd0 Add read depth option to interface 2022-09-26 14:25:47 -05:00
eugenefischer
60cf6775c2 notes toward command line read depth option 2022-09-26 14:25:30 -05:00
eugenefischer
8a8c89c9ba revert options menu 2022-09-26 14:24:58 -05:00
eugenefischer
86371668d5 Add menu option to activate simulation of read depth and sequence read errors 2022-09-26 13:47:19 -05:00
eugenefischer
d81ab25a68 Comment: need to update this when read count is implemented 2022-09-26 13:46:53 -05:00
eugenefischer
02c8e6aacb Refactor sequences to be strings instead of integers, to make simulating read errors easier 2022-09-26 13:37:48 -05:00
eugenefischer
f84dfb2b4b Method stub for simulating read depth 2022-09-26 00:43:13 -05:00
eugenefischer
184278b72e Add fields for simulating read depth. Also a priority queue for lookback auctions 2022-09-26 00:42:55 -05:00
eugenefischer
489369f533 Add flag to simulate read depth 2022-09-26 00:42:23 -05:00
eugenefischer
fbee591273 Change indentation 2022-09-25 22:36:02 -05:00
eugenefischer
603a999b59 Update readme 2022-09-25 22:35:52 -05:00
eugenefischer
c3df4b12ab Update readme with read depth TODO 2022-09-25 21:50:59 -05:00
eugenefischer
d1a56c3578 Hand-merge of some things from Dev_Vertex branch that didn't make it in for some reason 2022-09-25 19:07:25 -05:00
eugenefischer
16daf02dd6 Merge branch 'Dev_Vertex'
# Conflicts:
#	src/main/java/GraphModificationFunctions.java
#	src/main/java/GraphWithMapData.java
#	src/main/java/Simulator.java
#	src/main/java/Vertex.java
2022-09-25 18:33:26 -05:00
eugenefischer
04a077da2e update Readme 2022-09-25 18:24:12 -05:00
eugenefischer
740835f814 fix typo 2022-09-25 17:47:07 -05:00
eugenefischer
8a77d53f1f Output sequence counts before and after pre-filtering (currently pre-filtering only sequences present in all wells) 2022-09-25 17:20:50 -05:00
eugenefischer
58fa140ee5 add comments 2022-09-25 16:10:17 -05:00
eugenefischer
475bbf3107 Sort vertex lists by vertex label before making adjacency matrix 2022-09-25 15:54:28 -05:00
eugenefischer
4f2fa4cbbe Pre-filter saturating sequences only. Retaining singletons seems to improve matching accuracy in high sample rate test (well populations 10% of total cell sample size) 2022-09-25 15:19:56 -05:00
eugenefischer
58d418e44b Pre-filter saturating sequences only. Retaining singletons seems to improve matching accuracy in high sample rate test (well populations 10% of total cell sample size) 2022-09-25 15:06:46 -05:00
eugenefischer
1971a96467 Remove pre-filtering of singleton and saturating sequences 2022-09-25 14:55:43 -05:00
eugenefischer
e699795521 Revert "by-hand merge of needed code from custom vertex branch"
This reverts commit 29b844afd2.
2022-09-25 14:34:31 -05:00
eugenefischer
bd6d010b0b Revert "update TODO"
This reverts commit a054c0c20a.
2022-09-25 14:34:31 -05:00
eugenefischer
61d1eb3eb1 Revert "Reword output message"
This reverts commit 63317f2aa0.
2022-09-25 14:34:31 -05:00
eugenefischer
cb41b45204 Revert "Reword option menu item"
This reverts commit 06e72314b0.
2022-09-25 14:34:31 -05:00
eugenefischer
a84d2e1bfe Revert "Add comment on map data encodng"
This reverts commit 73c83bf35d.
2022-09-25 14:34:31 -05:00
eugenefischer
7b61d2c0d7 Revert "update version number"
This reverts commit e4e5a1f979.
2022-09-25 14:34:31 -05:00
eugenefischer
56454417c0 Revert "Restore pre-filtering of singleton and saturating sequences"
This reverts commit 5c03909a11.
2022-09-25 14:34:31 -05:00
eugenefischer
8ee1c5903e Merge branch 'master' into Dev_Vertex
# Conflicts:
#	src/main/java/GraphMLFileReader.java
#	src/main/java/InteractiveInterface.java
#	src/main/java/Simulator.java
2022-09-25 14:18:56 -05:00
eugenefischer
5c03909a11 Restore pre-filtering of singleton and saturating sequences 2022-09-22 01:39:13 -05:00
eugenefischer
dea4972927 remove prefiltering of singletons and saturating sequences 2022-09-21 16:09:08 -05:00
eugenefischer
9ae38bf247 Fix bug in correct match counter 2022-09-21 15:59:23 -05:00
817fe51708 Code cleanup 2022-02-26 09:56:46 -06:00
1ea68045ce Refactor cdr3 matching to use new Vertex class 2022-02-26 09:49:16 -06:00
75b2aa9553 testing graph attributes 2022-02-26 08:58:52 -06:00
b3dc10f287 add graph attributes to graphml writer 2022-02-26 08:15:48 -06:00
fb8d8d8785 make heap type an enum 2022-02-26 08:15:31 -06:00
ab437512e9 make Vertex serializable 2022-02-26 07:45:36 -06:00
7b03a3cce8 bugfix 2022-02-26 07:35:34 -06:00
f032d3e852 rewrite GraphML importer/exporter 2022-02-26 07:34:07 -06:00
b604b1d3cd Changing graph to use Vertex class 2022-02-26 06:19:08 -06:00
18 changed files with 1082 additions and 420 deletions

420
readme.md
View File

@@ -1,5 +1,25 @@
# BiGpairSEQ SIMULATOR
## CONTENTS
1. ABOUT
2. THEORY
3. THE BiGpairSEQ ALGORITHM
4. USAGE
1. RUNNING THE PROGRAM
2. COMMAND LINE OPTIONS
3. INTERACTIVE INTERFACE
4. INPUT/OUTPUT
1. Cell Sample Files
2. Sample Plate Files
3. Graph/Data Files
4. Matching Results Files
5. RESULTS
1. SAMPLE PLATES WITH VARYING NUMBERS OF CELLS PER WELL
2. SIMULATING EXPERIMENTS FROM pairSEQ PAPER
6. TODO
7. CITATIONS
8. ACKNOWLEDGEMENTS
9. AUTHOR
## ABOUT
@@ -8,26 +28,77 @@ of the pairSEQ algorithm (Howie, et al. 2015) for pairing T cell receptor sequen
## THEORY
Unlike pairSEQ, which calculates p-values for every TCR alpha/beta overlap and compares
against a null distribution, BiGpairSEQ does not do any statistical calculations
directly.
T cell receptors (TCRs) are encoded by pairs of sequences, alpha sequences (TCRAs) and beta sequences (TCRBs). These sequences
are extremely diverse; to the first approximation, this pair of sequences uniquely identifies a line of T cells.
As described in the original 2015 paper, pairSEQ pairs TCRAs and TCRBs by distributing a
sample of T cells across a 96-well sample plate, then sequencing the contents of each well. It then calculates p-values for
every TCRA/TCRB sequence overlap and compares that against a null distribution, to find the most statistically probable pairings.
BiGpairSEQ uses the same fundamental idea of using occupancy overlap to pair TCR sequences, but unlike pairSEQ it
does not require performing any statistical calculations at all. Instead, BiGpairSEQ uses graph theory methods which
produce provably optimal solutions.
BiGpairSEQ creates a [weighted bipartite graph](https://en.wikipedia.org/wiki/Bipartite_graph) representing the sample plate.
The distinct TCRA and TCRB sequences form the two sets of vertices. Every TCRA/TCRB pair that share a well
are connected by an edge, with the edge weight set to the number of wells in which both sequences appear.
(Sequences present in *all* wells are filtered out prior to creating the graph, as there is no signal in their occupancy pattern.)
The problem of pairing TCRA/TCRB sequences thus reduces to the "assignment problem" of finding a maximum weight
matching on a bipartite graph--the subset of vertex-disjoint edges whose weights sum to the maximum possible value.
The distinct TCRA and TCRB sequences form the two sets of vertices. Every TCRA/TCRB pair that share a well on the sample plate
are connected by an edge in the graph, with the edge weight set to the number of wells in which both sequences appear. The vertices
themselves are labeled with the occupancy data for the individual sequences they represent, which is useful for pre-filtering
before finding a maximum weight matching. Such a graph fully encodes the distribution data from the sample plate.
This is a well-studied combinatorial optimization problem, with many known solutions.
The most efficient algorithm known to the author for maximum weight matching of a bipartite graph with strictly integral
weights is from Duan and Su (2012). For a graph with m edges, n vertices per side, and maximum integer edge weight N,
their algorithm runs in **O(m sqrt(n) log(N))** time. As the graph representation of a pairSEQ experiment is
bipartite with integer weights, this algorithm is ideal for BiGpairSEQ.
The problem of pairing TCRA/TCRB sequences thus reduces to the [assignment problem](https://en.wikipedia.org/wiki/Assignment_problem) of finding a maximum weight
matching (MWM) on a bipartite graph--the subset of vertex-disjoint edges whose weights sum to the maximum possible value.
Unfortunately, it's a fairly new algorithm, and not yet implemented by the graph theory library used in this simulator.
So this program instead uses the Fibonacci heap-based algorithm of Fredman and Tarjan (1987), which has a worst-case
runtime of **O(n (n log(n) + m))**. The algorithm is implemented as described in Melhorn and Näher (1999).
This is a well-studied combinatorial optimization problem, with many known algorithms that produce
provably-optimal solutions. The most theoretically efficient algorithm known to the author for maximum weight matching of a bipartite
graph with strictly integral weights is from Duan and Su (2012). For a graph with m edges, n vertices per side,
and maximum integer edge weight N, their algorithm runs in **O(m sqrt(n) log(N))** time. As the graph representation of
a pairSEQ experiment is bipartite with integer weights, this algorithm seems ideal for BiGpairSEQ. Unfortunately, it's a
fairly new algorithm, and not yet implemented by the graph theory library used in this simulator (JGraphT), nor has the author had
time to implement it himself.
There have been some studies which show that [auction algorithms](https://en.wikipedia.org/wiki/Auction_algorithm) for the assignment problem can have superior performance in
real-world implementations, due to their simplicity, than more complex algorithms with better theoretical asymptotic
performance. But, again, there is no such algorithms implemented by JGraphT, nor has the author yet had time to implement one.
So this program instead uses the [Fibonacci heap](https://en.wikipedia.org/wiki/Fibonacci_heap) based algorithm of Fredman and Tarjan (1987) (essentially
[the Hungarian algorithm](https://en.wikipedia.org/wiki/Hungarian_algorithm) augmented with a more efficeint priority queue) which has a worst-case
runtime of **O(n (n log(n) + m))**. The algorithm is implemented as described in Melhorn and Näher (1999). (The simulator
allows the substitution of a [pairing heap](https://en.wikipedia.org/wiki/Pairing_heap) for a Fibonacci heap, though the relative performance difference of the two
has not yet been thoroughly tested.)
One possible advantage of this less efficient algorithm is that the Hungarian algorithm and its variations work with both the balanced and the unbalanced assignment problem
(that is, cases where both sides of the bipartite graph have the same number of vertices and those in which they don't.)
Many other MWM algorithms only work for the balanced assignment problem. While pairSEQ-style experiments should theoretically
be balanced assignment problems, in practice sequence dropout can cause them to be unbalanced. The unbalanced case
*can* be reduced to the balanced case, but doing so involves doubling the graph size. Since the current implementation uses only
the Hungarian algorithm, graph doubling--which could be challenging with the computational resources available to the
author--has not yet been necessary.
The relative time/space efficiencies of BiGpairSEQ when backed by different MWM algorithms remains an open problem.
## THE BiGpairSEQ ALGORITHM
1. Sequence a sample plate of T cells as in pairSEQ.
2. Pre-filter the sequence data to reduce error and minimize the size of the necessary graph.
1. *Saturating sequence filter*: remove any sequences present in all wells on the sample plate, as there is no signal in the occupancy data of saturating sequences (and each saturating sequence will have an edge to every vertex on the opposite side of the graph, vastly increasing the total graph size).
2. *Non-existent sequence filter*: sequencing misreads can pollute the data from the sample plate with non-existent sequences. These can be identified by the discrepancy between their occupancy and their total read count. Assuming sequences are read correctly at least half the time, then a sequence's total read count (R) should be at least half the well occupancy of that sequence (O) times the read depth of the sequencing run (D). Remove any sequences for which R < (O * D) / 2.
3. *Misidentified sequence filter*: sequencing misreads can cause one real sequence to be misidentified as a different real sequence. This should be fairly infrequent, but is a problem if it skews a sequence's overall occupancy pattern by causing the sequence to seem to be in a well where it's not. This can be detected by looking for discrepancies in a sequence's per-well read count. On average, the read count for a sequence in an individual well (r) should be equal to its total read count (R) divided by its total well occupancy (O). Remove from the list of wells occupied by a sequence any wells for which r < R / (2 * O).
3. Encode the occupancy data from the sample plate as a weighted bipartite graph, where one set of vertices represent the distinct TCRAs and the other set represents distinct TCRBs. Between any TCRA and TCRB that share a well, draw an edge. Assign that edge a weight equal to the total number of wells shared by both sequences.
4. Find a maximum weight matching of the bipartite graph, using any [MWM algorithm](https://en.wikipedia.org/wiki/Assignment_problem#Algorithms) that produces a provably optimal result.
* If desired, restrict the matching to a subset of the graph. (Example: restricting matching attempts to cases where the occupancy overlap is 4 or more wells--that is, edges with weight >= 4.0.) See below for discussion of why this might be desirable.
5. The resultant matching represents the likeliest TCRA/TCRB sequence pairs based on the occupancy pattern of the sample plate.
It is important to note that a maximum weight matching is not necessarily unique. If two different sets of vertex-disjoint edges
sum to the same maximal weight, then a MWM algorithms might find either one of them.
For example, consider a well that contains four rare sequences found only in that well, two TCRAs and two TCRBs.
In the graph, both of those TCRAs would have edges to both TCRBs (and to others of course, but since those edges will have a weight of 1.0,
they are unlikely be paired in a MWM to sequences with total occupancy of more than one well). If these four sequences
represent two unique T cells, then only one of the two possible pairings between these sequences is correct. But both
the correct and incorrect pairing will add 2.0 to the total graph weight, so either one could be part of a maximum weight matching.
It is to minimize the number of possible equivalent-weight matchings that one might restrict the algorithm to examining
only a subset of the graph, as described in step 4 above.
## USAGE
@@ -48,11 +119,117 @@ For example, to run the program with 32 gigabytes of memory, use the command:
`java -Xmx32G -jar BiGpairSEQ_Sim.jar`
There are a number of command line options, to allow the program to be used in shell scripts. For a full list,
use the `-help` flag:
### COMMAND LINE OPTIONS
There are a number of command line options, to allow the program to be used in shell scripts. These can be viewed with
the `-help` flag:
`java -jar BiGpairSEQ_Sim.jar -help`
```
usage: BiGpairSEQ_Sim.jar
-cells,--make-cells Makes a cell sample file of distinct T cells
-graph,--make-graph Makes a graph/data file. Requires a cell sample
file and a sample plate file
-help Displays this help menu
-match,--match-cdr3 Matches CDR3s. Requires a graph/data file.
-plate,--make-plate Makes a sample plate file. Requires a cell sample
file.
-version Prints the program version number to stdout
usage: BiGpairSEQ_Sim.jar -cells
-d,--diversity-factor <factor> The factor by which unique CDR3s
outnumber unique CDR1s
-n,--num-cells <number> The number of distinct cells to generate
-o,--output-file <filename> Name of output file
usage: BiGpairSEQ_Sim.jar -plate
-c,--cell-file <filename> The cell sample file to use
-d,--dropout-rate <rate> The sequence dropout rate due to
amplification error. (0.0 - 1.0)
-exponential Use an exponential distribution for cell
sample
-gaussian Use a Gaussian distribution for cell sample
-lambda <value> If using -exponential flag, lambda value
for distribution
-o,--output-file <filename> Name of output file
-poisson Use a Poisson distribution for cell sample
-pop <number [number]...> The well populations for each section of
the sample plate. There will be as many
sections as there are populations given.
-random <min> <max> Randomize well populations on sample plate.
Takes two arguments: the minimum possible
population and the maximum possible
population.
-stddev <value> If using -gaussian flag, standard deviation
for distrbution
-w,--wells <number> The number of wells on the sample plate
usage: BiGpairSEQ_Sim.jar -graph
-c,--cell-file <filename> Cell sample file to use for
checking pairing accuracy
-err,--read-error-prob <prob> (Optional) The probability that
a sequence will be misread. (0.0
- 1.0)
-errcoll,--error-collision-prob <prob> (Optional) The probability that
two misreads will produce the
same spurious sequence. (0.0 -
1.0)
-graphml (Optional) Output GraphML file
-nb,--no-binary (Optional) Don't output
serialized binary file
-o,--output-file <filename> Name of output file
-p,--plate-filename <filename> Sample plate file from which to
construct graph
-rd,--read-depth <depth> (Optional) The number of times
to read each sequence.
-realcoll,--real-collision-prob <prob> (Optional) The probability that
a sequence will be misread as
another real sequence. (Only
applies to unique misreads;
after this has happened once,
future error collisions could
produce the real sequence again)
(0.0 - 1.0)
usage: BiGpairSEQ_Sim.jar -match
-g,--graph-file <filename> The graph/data file to use
-max <number> The maximum number of shared wells to
attempt to match a sequence pair
-maxdiff <number> (Optional) The maximum difference in total
occupancy between two sequences to attempt
matching.
-min <number> The minimum number of shared wells to
attempt to match a sequence pair
-minpct <percent> (Optional) The minimum percentage of a
sequence's total occupancy shared by
another sequence to attempt matching. (0 -
100)
-o,--output-file <filename> (Optional) Name of output the output file.
If not present, no file will be written.
--print-alphas (Optional) Print the number of distinct
alpha sequences to stdout.
--print-attempt (Optional) Print the pairing attempt rate
to stdout
--print-betas (Optional) Print the number of distinct
beta sequences to stdout.
--print-correct (Optional) Print the number of correct
pairs to stdout
--print-error (Optional) Print the pairing error rate to
stdout
--print-incorrect (Optional) Print the number of incorrect
pairs to stdout
--print-metadata (Optional) Print a full summary of the
matching results to stdout.
--print-time (Optional) Print the total simulation time
to stdout.
-pv,--p-value (Optional) Calculate p-values for sequence
pairs.
```
### INTERACTIVE INTERFACE
If no command line arguments are given, BiGpairSEQ_Sim will launch with an interactive, menu-driven CLI for
generating files and simulating TCR pairing. The main menu looks like this:
@@ -79,7 +256,8 @@ By default, the Options menu looks like this:
3) Turn on graph/data file caching
4) Turn off serialized binary graph output
5) Turn on GraphML graph output
6) Maximum weight matching algorithm options
6) Turn on calculation of p-values
7) Maximum weight matching algorithm options
0) Return to main menu
```
@@ -96,7 +274,7 @@ These files are often generated in sequence. When entering filenames, it is not
(.csv or .ser). When reading or writing files, the program will automatically add the correct extension to any filename
without one.
To save file I/O time, the most recent instance of each of these four
To save file I/O time when using the interactive interface, the most recent instance of each of these four
files either generated or read from disk can be cached in program memory. When caching is active, subsequent uses of the
same data file won't need to be read in again until another file of that type is used or generated,
or caching is turned off for that file type. The program checks whether it needs to update its cached data by comparing
@@ -116,7 +294,7 @@ turned on in the Options menu. By default, GraphML output is OFF.
Cell Sample files consist of any number of distinct "T cells." Every cell contains
four sequences: Alpha CDR3, Beta CDR3, Alpha CDR1, Beta CDR1. The sequences are represented by
random integers. CDR3 Alpha and Beta sequences are all unique within a given Cell Sample file. CDR1 Alpha and Beta sequences
are not necessarily unique; the relative diversity can be set when making the file.
are not necessarily unique; the relative diversity of CRD1s with respect to CDR3s can be set when making the file.
(Note: though cells still have CDR1 sequences, matching of CDR1s is currently awaiting re-implementation.)
@@ -133,7 +311,7 @@ Structure:
| Alpha CDR3 | Beta CDR3 | Alpha CDR1 | Beta CDR1 |
|---|---|---|---|
|unique number|unique number|number|number|
| ... | ... |... | ... |
---
#### Sample Plate Files
@@ -142,7 +320,8 @@ described above). The wells are filled randomly from a Cell Sample file, accordi
frequency distribution. Additionally, every individual sequence within each cell may, with some
given dropout probability, be omitted from the file; this simulates the effect of amplification errors
prior to sequencing. Plates can also be partitioned into any number of sections, each of which can have a
different concentration of T cells per well.
different concentration of T cells per well. Alternatively, the number of T cells in each well can be randomized between
given minimum and maximum population values.
Options when making a Sample Plate file:
* Cell Sample file to use
@@ -152,7 +331,6 @@ Options when making a Sample Plate file:
* Standard deviation size
* Exponential
* Lambda value
* *(Based on the slope of the graph in Figure 4C of the pairSEQ paper, the distribution of the original experiment was approximately exponential with a lambda ~0.6. (Howie, et al. 2015))*
* Total number of wells on the plate
* Well populations random or fixed
* If random, minimum and maximum population sizes
@@ -160,7 +338,7 @@ Options when making a Sample Plate file:
* Number of sections on plate
* Number of T cells per well
* per section, if more than one section
* Dropout rate
* Sequence dropout rate
Files are in CSV format. There are no header labels. Every row represents a well.
Every value represents an individual cell, containing four sequences, depicted as an array string:
@@ -199,7 +377,12 @@ then use it for multiple different BiGpairSEQ simulations.
Options for creating a Graph/Data file:
* The Cell Sample file to use
* The Sample Plate file to use. (This must have been generated from the selected Cell Sample file.)
* The Sample Plate file to use (This must have been generated from the selected Cell Sample file.)
* Whether to simulate sequencing read depth. If simulated:
* The read depth (The number of times each sequence is read)
* The read error rate (The probability a sequence is misread)
* The error collision rate (The probability two misreads produce the same spurious sequence)
* The real sequence collision rate (The probability that a misread will produce a different, real sequence from the sample plate. Only applies to new misreads; once an error of this type has occurred, it's likelihood of occurring again is dominated by the error collision probability.)
These files do not have a human-readable structure, and are not portable to other programs.
@@ -207,8 +390,8 @@ These files do not have a human-readable structure, and are not portable to othe
For portability of graph data to other software, turn on [GraphML](http://graphml.graphdrawing.org/index.html) output
in the Options menu in interactive mode, or use the `-graphml`command line argument. This will produce a .graphml file
for the weighted graph, with vertex attributes for sequence, type, and occupancy data. This graph contains all the data
necessary for the BiGpairSEQ matching algorithm. It does not include the data to measure pairing accuracy; for that,
for the weighted graph, with vertex attributes for sequence, type, total occupancy, total read count, and the read count for every individual occupied well.
This graph contains all the data necessary for the BiGpairSEQ matching algorithm. It does not include the data to measure pairing accuracy; for that,
compare the matching results to the original Cell Sample .csv file.
---
@@ -216,7 +399,7 @@ compare the matching results to the original Cell Sample .csv file.
#### Matching Results Files
Matching results files consist of the results of a BiGpairSEQ matching simulation. Making them requires a serialized
binary Graph/Data file (.ser). (Because .graphML files are larger than .ser files, BiGpairSEQ_Sim supports .graphML
output only. Graph/data input must use a serialized binary.)
output only. Graph input must use a serialized binary.)
Matching results files are in CSV format. Rows are sequence pairings with extra relevant data. Columns are pairing-specific details.
Metadata about the matching simulation is included as comments. Comments are preceded by `#`.
@@ -234,56 +417,65 @@ Options when running a BiGpairSEQ simulation of CDR3 alpha/beta matching:
Example output:
```
# Source Sample Plate file: 4MilCellsPlate.csv
# Source Graph and Data file: 4MilCellsPlateGraph.ser
# T cell counts in sample plate wells: 30000
# Total alphas found: 11813
# Total betas found: 11808
# High overlap threshold: 94
# Low overlap threshold: 3
# Minimum overlap percent: 0
# Maximum occupancy difference: 96
# Pairing attempt rate: 0.438
# Correct pairings: 5151
# Incorrect pairings: 18
# Pairing error rate: 0.00348
# Simulation time: 862 seconds
# cell sample filename: 8MilCells.csv
# cell sample size: 8000000
# sample plate filename: 8MilCells_Plate.csv
# sample plate well count: 96
# sequence dropout rate: 0.1
# graph filename: 8MilGraph_rd2
# MWM algorithm type: LEDA book with heap: FIBONACCI
# matching weight: 218017.0
# well populations: 4000
# sequence read depth: 100
# sequence read error rate: 0.01
# read error collision rate: 0.1
# real sequence collision rate: 0.05
# total alphas read from plate: 323711
# total betas read from plate: 323981
# alphas in graph (after pre-filtering): 11707
# betas in graph (after pre-filtering): 11705
# high overlap threshold for pairing: 95
# low overlap threshold for pairing: 3
# minimum overlap percent for pairing: 0
# maximum occupancy difference for pairing: 2147483647
# pairing attempt rate: 0.716
# correct pairing count: 8373
# incorrect pairing count: 7
# pairing error rate: 0.000835
# time to generate graph (seconds): 293
# time to pair sequences (seconds): 1,416
# total simulation time (seconds): 1,709
```
| Alpha | Alpha well count | Beta | Beta well count | Overlap count | Matched Correctly? | P-value |
|---|---|---|---|---|---|---|
|5242972|17|1571520|18|17|true|1.41E-18|
|5161027|18|2072219|18|18|true|7.31E-20|
|4145198|33|1064455|30|29|true|2.65E-21|
|7700582|18|112748|18|18|true|7.31E-20|
|10258642|73|1172093|72|70.0|true|4.19E-21|
|6186865|34|4290363|37|34.0|true|4.56E-26|
|10222686|70|11044018|72|68.0|true|9.55E-25|
|5338100|75|2422988|76|74.0|true|4.57E-22|
|12363907|33|6569852|35|33.0|true|5.70E-26|
|...|...|...|...|...|...|...|
---
**NOTE: The p-values in the output are not used for matching**—they aren't part of the BiGpairSEQ algorithm at all.
P-values are calculated *after* BiGpairSEQ matching is completed, for purposes of comparison only,
using the (2021 corrected) formula from the original pairSEQ paper. (Howie, et al. 2015)
**NOTE: The p-values in the sample output above are not used for matching**—they aren't part of the BiGpairSEQ algorithm at all.
P-values (if enabled in the interactive menu options or by using the -pv flag on the command line) are calculated *after*
BiGpairSEQ matching is completed, for purposes of comparison with pairSEQ only, using the (corrected) formula from the
original pairSEQ paper. (Howie, et al. 2015) Calculation of p-values is off by default to reduce processing time.
## PERFORMANCE
## RESULTS
On a home computer with a Ryzen 5600X CPU, 64GB of 3200MHz DDR4 RAM (half of which was allocated to the Java Virtual Machine), and a PCIe 3.0 SSD, running Linux Mint 20.3 Edge (5.13 kernel),
the author ran a BiGpairSEQ simulation of a 96-well sample plate with 30,000 T cells/well comprising ~11,800 alphas and betas,
taken from a sample of 4,000,000 distinct cells with an exponential frequency distribution (lambda 0.6).
Several BiGpairSEQ simulations were performed on a home computer with the following specs:
With min/max occupancy threshold of 3 and 94 wells for matching, and no other pre-filtering, BiGpairSEQ identified 5,151
correct pairings and 18 incorrect pairings, for an accuracy of 99.652%.
* Ryzen 5600X CPU
* 128GB of 3200MHz DDR4 RAM
* 2TB PCIe 3.0 SSD
* Linux Mint 21 (5.15 kernel)
The total simulation time was 14'22". If intermediate results were held in memory, this would be equivalent to the total elapsed time.
Since this implementation of BiGpairSEQ writes intermediate results to disk (to improve the efficiency of *repeated* simulations
with different filtering options), the actual elapsed time was greater. File I/O time was not measured, but took
slightly less time than the simulation itself. Real elapsed time from start to finish was under 30 minutes.
As mentioned in the theory section, performance could be improved by implementing a more efficient algorithm for finding
the maximum weighted matching.
## BEHAVIOR WITH RANDOMIZED WELL POPULATIONS
### SAMPLE PLATES WITH VARYING NUMBERS OF CELLS PER WELL
NOTE: these results were obtained with an earlier version of BiGpairSEQ_Sim, and should be re-run with the current version.
The observed behavior is not believed to be likely to change, however.
A series of BiGpairSEQ simulations were conducted using a cell sample file of 3.5 million unique T cells. From these cells,
10 sample plate files were created. All of these sample plates had 96 wells, used an exponential distribution with a lambda of 0.6, and
@@ -328,6 +520,70 @@ The average results for the randomized plates are closest to the constant plate
This and several other tests indicate that BiGpairSEQ treats a sample plate with a highly variable number of T cells/well
roughly as though it had a constant well population equal to the plate's average well population.
### SIMULATING EXPERIMENTS FROM THE 2015 pairSEQ PAPER
#### Experiment 1
This simulation was an attempt to replicate the conditions of experiment 1 from the 2015 pairSEQ paper: a matching was found for a
96-well sample plate with 4,000 T cells/well, taken from a sample of 8,400,000
distinct cells sampled with an exponential frequency distribution. Examination of Figure 4C from the paper seems to show the points
(-5, 4) and (-4.5, 3.3) on the line at the boundary of the shaded region, so a lambda value of 1.4 was used for the
exponential distribution.
The sequence dropout rate was 10%, as the analysis in the paper concluded that most TCR
sequences "have less than a 10% chance of going unobserved." (Howie, et al. 2015) Given this choice of 10%, the simulated
sample plate is likely to have more sequence dropout, and thus greater error, than the real experiment.
The original paper does not contain (or the author of this document failed to identify) information on sequencing depth,
read error probability, or the probabilities of different kinds of read error collisions. As the pre-filtering of BiGpairSEQ
has successfully filtered out all such errors for any reasonable error rates the author has yet tested, this simulation was
done without simulating any sequencing errors, to reduce the processing time.
This simulation was performed 5 times with min/max occupancy thresholds of 3 and 95 wells respectively for matching.
| |Run 1|Run 2|Run 3|Run 4|Run 5| Average|
|---|---|---|---|---|---|---|
|Total pairs|4398|4420|4404|4409|4414|4409|
|Correct pairs|4322|4313|4337|4336|4339|4329.4|
|Incorrect pairs|76|107|67|73|75|79.6|
|Error rate|0.0173|0.0242|0.0152|0.0166|0.0170|0.018|
|Simulation time (seconds)|697|484|466|473|463|516.6|
The experiment in the original paper called 4143 pairs with a false discovery rate of 0.01.
Given the roughness of the estimation for the cell frequency distribution of the original experiment and the likely
higher rate of sequence dropout in the simulation, these simulated results match the real experiment fairly well.
#### Experiment 3
To simulate experiment 3 from the original paper, a matching was made for a 96-well sample plate with 160,000 T cells/well,
taken from a sample of 4.5 million distinct T cells sampled with an exponential frequency distribution (lambda 1.4). The
sequence dropout rate was again 10%, and no sequencing errors were simulated. Once again, deviation from the original
experiment is expected due to the roughness of the estimated frequency distribution, and due to the high sequence dropout
rate.
Results metadata:
```
# total alphas read from plate: 6929
# total betas read from plate: 6939
# alphas in graph (after pre-filtering): 4452
# betas in graph (after pre-filtering): 4461
# high overlap threshold for pairing: 95
# low overlap threshold for pairing: 3
# minimum overlap percent for pairing: 0
# maximum occupancy difference for pairing: 100
# pairing attempt rate: 0.767
# correct pairing count: 3233
# incorrect pairing count: 182
# pairing error rate: 0.0533
# time to generate graph (seconds): 40
# time to pair sequences (seconds): 230
# total simulation time (seconds): 270
```
The simulation ony found 6929 distinct TCRAs and 6939 TCRBs on the sample plate, orders of magnitude fewer than the number of
pairs called in the pairSEQ experiment. These results show that at very high sampling depths, the differences in the
underlying frequency distribution drastically affect the results. The real distribution clearly has a much longer "tail"
than the simulated exponential distribution. Implementing a way to exert finer control over the sampling distribution from
the file of distinct cells may enable better simulated replication of this experiment.
## TODO
* ~~Try invoking GC at end of workloads to reduce paging to disk~~ DONE
@@ -340,28 +596,44 @@ roughly as though it had a constant well population equal to the plate's average
* ~~Add controllable heap-type parameter?~~
* Parameter implemented. Fibonacci heap the current default.
* ~~Implement sample plates with random numbers of T cells per well.~~ DONE
* Possible BiGpairSEQ advantage over pairSEQ: BiGpairSEQ is resilient to variations in well population sizes on a sample plate; pairSEQ is not.
* Possible BiGpairSEQ advantage over pairSEQ: BiGpairSEQ is resilient to variations in well population sizes on a sample plate; pairSEQ is not due to nature of probability calculations.
* preliminary data suggests that BiGpairSEQ behaves roughly as though the whole plate had whatever the *average* well concentration is, but that's still speculative.
* See if there's a reasonable way to reformat Sample Plate files so that wells are columns instead of rows.
* ~~See if there's a reasonable way to reformat Sample Plate files so that wells are columns instead of rows.~~
* ~~Problem is variable number of cells in a well~~
* ~~Apache Commons CSV library writes entries a row at a time~~
* _Got this working, but at the cost of a profoundly strange bug in graph occupancy filtering. Have reverted the repo until I can figure out what caused that. Given how easily Thingiverse transposes CSV matrices in R, might not even be worth fixing.
* Got this working, but at the cost of a profoundly strange bug in graph occupancy filtering. Have reverted the repo until I can figure out what caused that. Given how easily Thingiverse transposes CSV matrices in R, might not even be worth fixing.
* ~~Enable GraphML output in addition to serialized object binaries, for data portability~~ DONE
* ~~Custom vertex type with attribute for sequence occupancy?~~ DONE
* Advantage: would eliminate the need to use maps to associate vertices with sequences, which would make the code easier to understand.
* ~~Have a branch where this is implemented, but there's a bug that broke matching. Don't currently have time to fix.~~
* ~~Have a branch where this is implemented, but there's a bug that broke matching. Don't currently have time to fix.~~
* ~~Re-implement command line arguments, to enable scripting and statistical simulation studies~~ DONE
* ~~Implement custom Vertex class to simplify code and make it easier to implement different MWM algorithms~~ DONE
* Advantage: would eliminate the need to use maps to associate vertices with sequences, which would make the code easier to understand.
* This also seems to be faster when using the same algorithm than the version with lots of maps, which is a nice bonus!
* ~~Implement simulation of read depth, and of read errors. Pre-filter graph for difference in read count to eliminate spurious sequences.~~ DONE
* Pre-filtering based on comparing (read depth) * (occupancy) to (read count) for each sequence works extremely well
* ~~Add read depth simulation options to CLI~~ DONE
* ~~Update graphml output to reflect current Vertex class attributes~~ DONE
* Individual well data from the SequenceRecords could be included, if there's ever a reason for it
* ~~Implement simulation of sequences being misread as other real sequence~~ DONE
* Update matching metadata output options in CLI
* Add frequency distribution details to metadata output
* need to make an enum for the different distribution types and refactor the Plate class and user interfaces, also add the necessary fields to GraphWithMapData and then call if from Simulator
* Update performance data in this readme
* Add section to ReadMe describing data filtering methods.
* Re-implement CDR1 matching method
* Refactor simulator code to collect all needed data in a single scan of the plate
* Currently it scans once for the vertices and then again for the edge weights. This made simulating read depth awkward, and incompatible with caching of plate files.
* This would be a fairly major rewrite of the simulator code, but could make things faster, and would definitely make them cleaner.
* Implement Duan and Su's maximum weight matching algorithm
* Add controllable algorithm-type parameter?
* This would be fun and valuable, but probably take more time than I have for a hobby project.
* Implement an auction algorithm for maximum weight matching
* Implement an algorithm for approximating a maximum weight matching
* Some of these run in linear or near-linear time
* given that the underlying biological samples have many, many sources of error, this would probably be the most useful option in practice. It seems less mathematically elegant, though, and so less fun for me.
* Implement Vose's alias method for arbitrary statistical distributions of cells
* Should probably refactor to use apache commons rng for this
* Use commons JCS for caching
* Enable post-filtering instead of pre-filtering. Pre-filtering of things like singleton sequences or saturating-occupancy sequences reduces graph size, but could conceivably reduce pairing accuracy by throwing away data. While these sequences have very little signal, it would be interesting to compare unfiltered results to filtered results. This would require a much, much faster MWM algorithm, though, to handle the much larger graphs. Possible one of the linear-time approximation algorithms.
* Parameterize pre-filtering options
## CITATIONS
@@ -381,4 +653,4 @@ BiGpairSEQ was conceived in collaboration with Dr. Alice MacQueen, who brought t
pairSEQ paper to the author's attention and explained all the biology terms he didn't know.
## AUTHOR
BiGpairSEQ algorithm and simulation by Eugene Fischer, 2021. UI improvements and documentation, 2022.
BiGpairSEQ algorithm and simulation by Eugene Fischer, 2021. Improvements and documentation, 2022.

View File

@@ -13,10 +13,11 @@ public class BiGpairSEQ {
private static boolean cacheCells = false;
private static boolean cachePlate = false;
private static boolean cacheGraph = false;
private static String priorityQueueHeapType = "FIBONACCI";
private static HeapType priorityQueueHeapType = HeapType.FIBONACCI;
private static boolean outputBinary = true;
private static boolean outputGraphML = false;
private static final String version = "version 3.0";
private static boolean calculatePValue = false;
private static final String version = "version 4.2";
public static void main(String[] args) {
if (args.length == 0) {
@@ -157,15 +158,15 @@ public class BiGpairSEQ {
}
public static String getPriorityQueueHeapType() {
return priorityQueueHeapType;
return priorityQueueHeapType.name();
}
public static void setPairingHeap() {
priorityQueueHeapType = "PAIRING";
priorityQueueHeapType = HeapType.PAIRING;
}
public static void setFibonacciHeap() {
priorityQueueHeapType = "FIBONACCI";
priorityQueueHeapType = HeapType.FIBONACCI;
}
public static boolean outputBinary() {return outputBinary;}
@@ -173,5 +174,9 @@ public class BiGpairSEQ {
public static boolean outputGraphML() {return outputGraphML;}
public static void setOutputGraphML(boolean b) {outputGraphML = b;}
public static boolean calculatePValue() {return calculatePValue; }
public static void setCalculatePValue(boolean b) {calculatePValue = b; }
public static String getVersion() { return version; }
}

View File

@@ -12,7 +12,7 @@ import java.util.List;
public class CellFileReader {
private String filename;
private List<Integer[]> distinctCells = new ArrayList<>();
private List<String[]> distinctCells = new ArrayList<>();
private Integer cdr1Freq;
public CellFileReader(String filename) {
@@ -32,11 +32,11 @@ public class CellFileReader {
CSVParser parser = new CSVParser(reader, cellFileFormat);
){
for(CSVRecord record: parser.getRecords()) {
Integer[] cell = new Integer[4];
cell[0] = Integer.valueOf(record.get("Alpha CDR3"));
cell[1] = Integer.valueOf(record.get("Beta CDR3"));
cell[2] = Integer.valueOf(record.get("Alpha CDR1"));
cell[3] = Integer.valueOf(record.get("Beta CDR1"));
String[] cell = new String[4];
cell[0] = record.get("Alpha CDR3");
cell[1] = record.get("Beta CDR3");
cell[2] = record.get("Alpha CDR1");
cell[3] = record.get("Beta CDR1");
distinctCells.add(cell);
}
@@ -47,8 +47,8 @@ public class CellFileReader {
}
//get CDR1 frequency
ArrayList<Integer> cdr1Alphas = new ArrayList<>();
for (Integer[] cell : distinctCells) {
ArrayList<String> cdr1Alphas = new ArrayList<>();
for (String[] cell : distinctCells) {
cdr1Alphas.add(cell[3]);
}
double count = cdr1Alphas.stream().distinct().count();
@@ -58,18 +58,10 @@ public class CellFileReader {
}
public CellSample getCellSample() {
return new CellSample(distinctCells, cdr1Freq);
CellSample sample = new CellSample(distinctCells, cdr1Freq);
sample.setFilename(filename);
return sample;
}
public String getFilename() { return filename;}
//Refactor everything that uses this to have access to a Cell Sample and get the cells there instead.
public List<Integer[]> getListOfDistinctCellsDEPRECATED(){
return distinctCells;
}
public Integer getCellCountDEPRECATED() {
//Refactor everything that uses this to have access to a Cell Sample and get the count there instead.
return distinctCells.size();
}
}

View File

@@ -11,7 +11,7 @@ import java.util.List;
public class CellFileWriter {
private String[] headers = {"Alpha CDR3", "Beta CDR3", "Alpha CDR1", "Beta CDR1"};
List<Integer[]> cells;
List<String[]> cells;
String filename;
Integer cdr1Freq;
@@ -35,7 +35,7 @@ public class CellFileWriter {
printer.printComment("Sample contains 1 unique CDR1 for every " + cdr1Freq + "unique CDR3s.");
printer.printRecords(cells);
} catch(IOException ex){
System.out.println("Could not make new file named "+filename);
System.out.println("Could not make new file named " + filename);
System.err.println(ex);
}
}

View File

@@ -5,39 +5,49 @@ import java.util.stream.IntStream;
public class CellSample {
private List<Integer[]> cells;
private List<String[]> cells;
private Integer cdr1Freq;
private String filename;
public CellSample(Integer numDistinctCells, Integer cdr1Freq){
this.cdr1Freq = cdr1Freq;
List<Integer> numbersCDR3 = new ArrayList<>();
List<Integer> numbersCDR1 = new ArrayList<>();
Integer numDistCDR3s = 2 * numDistinctCells + 1;
//Assign consecutive integers for each CDR3. This ensures they are all unique.
IntStream.range(1, numDistCDR3s + 1).forEach(i -> numbersCDR3.add(i));
//After all CDR3s are assigned, start assigning consecutive integers to CDR1s
//There will usually be fewer integers in the CDR1 list, which will allow repeats below
IntStream.range(numDistCDR3s + 1, numDistCDR3s + 1 + (numDistCDR3s / cdr1Freq) + 1).forEach(i -> numbersCDR1.add(i));
//randomize the order of the numbers in the lists
Collections.shuffle(numbersCDR3);
Collections.shuffle(numbersCDR1);
//Each cell represented by 4 values
//two CDR3s, and two CDR1s. First two values are CDR3s (alpha, beta), second two are CDR1s (alpha, beta)
List<Integer[]> distinctCells = new ArrayList<>();
List<String[]> distinctCells = new ArrayList<>();
for(int i = 0; i < numbersCDR3.size() - 1; i = i + 2){
Integer tmpCDR3a = numbersCDR3.get(i);
Integer tmpCDR3b = numbersCDR3.get(i+1);
Integer tmpCDR1a = numbersCDR1.get(i % numbersCDR1.size());
Integer tmpCDR1b = numbersCDR1.get((i+1) % numbersCDR1.size());
Integer[] tmp = {tmpCDR3a, tmpCDR3b, tmpCDR1a, tmpCDR1b};
//Go through entire CDR3 list once, make pairs of alphas and betas
String tmpCDR3a = numbersCDR3.get(i).toString();
String tmpCDR3b = numbersCDR3.get(i+1).toString();
//Go through the (likely shorter) CDR1 list as many times as necessary, make pairs of alphas and betas
String tmpCDR1a = numbersCDR1.get(i % numbersCDR1.size()).toString();
String tmpCDR1b = numbersCDR1.get((i+1) % numbersCDR1.size()).toString();
//Make the array representing the cell
String[] tmp = {tmpCDR3a, tmpCDR3b, tmpCDR1a, tmpCDR1b};
//Add the cell to the list of distinct cells
distinctCells.add(tmp);
}
this.cells = distinctCells;
this.filename = filename;
}
public CellSample(List<Integer[]> cells, Integer cdr1Freq){
public CellSample(List<String[]> cells, Integer cdr1Freq){
this.cells = cells;
this.cdr1Freq = cdr1Freq;
}
public List<Integer[]> getCells(){
public List<String[]> getCells(){
return cells;
}
@@ -49,4 +59,8 @@ public class CellSample {
return cells.size();
}
public String getFilename() { return filename; }
public void setFilename(String filename) { this.filename = filename; }
}

View File

@@ -35,6 +35,10 @@ import java.util.stream.Stream;
* output : name of the output file
* graphml : output a graphml file
* binary : output a serialized binary object file
* IF SIMULATING READ DEPTH, ALL THESE ARE REQUIRED. Absence indicates not simulating read depth
* readdepth: number of reads per sequence
* readerrorprob: probability of reading a sequence incorrectly
* errcollisionprob: probability of two read errors being identical
*
* Match flags:
* graphFile : name of graph and data file to use as input
@@ -44,6 +48,7 @@ import java.util.stream.Stream;
* minpercent : (optional) the minimum percent overlap to attempt a matching.
* writefile : (optional) the filename to write results to
* output : the values to print to System.out for piping
* pv : (optional) calculate p-values
*
*/
public class CommandLineInterface {
@@ -92,7 +97,7 @@ public class CommandLineInterface {
Integer[] populations;
String outputFilename = line.getOptionValue("o");
Integer numWells = Integer.parseInt(line.getOptionValue("w"));
Double dropoutRate = Double.parseDouble(line.getOptionValue("err"));
Double dropoutRate = Double.parseDouble(line.getOptionValue("d"));
if (line.hasOption("random")) {
//Array holding values of minimum and maximum populations
Integer[] min_max = Stream.of(line.getOptionValues("random"))
@@ -142,7 +147,25 @@ public class CommandLineInterface {
CellSample cells = getCells(cellFilename);
//get plate
Plate plate = getPlate(plateFilename);
GraphWithMapData graph = Simulator.makeGraph(cells, plate, false);
GraphWithMapData graph;
Integer readDepth = 1;
Double readErrorRate = 0.0;
Double errorCollisionRate = 0.0;
Double realSequenceCollisionRate = 0.0;
if (line.hasOption("rd")) {
readDepth = Integer.parseInt(line.getOptionValue("rd"));
}
if (line.hasOption("err")) {
readErrorRate = Double.parseDouble(line.getOptionValue("err"));
}
if (line.hasOption("errcoll")) {
errorCollisionRate = Double.parseDouble(line.getOptionValue("errcoll"));
}
if (line.hasOption("realcoll")) {
realSequenceCollisionRate = Double.parseDouble(line.getOptionValue("realcoll"));
}
graph = Simulator.makeCDR3Graph(cells, plate, readDepth, readErrorRate, errorCollisionRate,
realSequenceCollisionRate, false);
if (!line.hasOption("no-binary")) { //output binary file unless told not to
GraphDataObjectWriter writer = new GraphDataObjectWriter(outputFilename, graph, false);
writer.writeDataToFile();
@@ -180,9 +203,12 @@ public class CommandLineInterface {
else {
maxOccupancyDiff = Integer.MAX_VALUE;
}
if (line.hasOption("pv")) {
BiGpairSEQ.setCalculatePValue(true);
}
GraphWithMapData graph = getGraph(graphFilename);
MatchingResult result = Simulator.matchCDR3s(graph, graphFilename, minThreshold, maxThreshold,
maxOccupancyDiff, minOverlapPct, false);
maxOccupancyDiff, minOverlapPct, false, BiGpairSEQ.calculatePValue());
if(outputFilename != null){
MatchingFileWriter writer = new MatchingFileWriter(outputFilename, result);
writer.writeResultsToFile();
@@ -345,7 +371,8 @@ public class CommandLineInterface {
.hasArgs()
.argName("number [number]...")
.build();
Option dropoutRate = Option.builder("err") //add this to plate options
Option dropoutRate = Option.builder("d") //add this to plate options
.longOpt("dropout-rate")
.hasArg()
.desc("The sequence dropout rate due to amplification error. (0.0 - 1.0)")
.argName("rate")
@@ -384,11 +411,41 @@ public class CommandLineInterface {
.longOpt("no-binary")
.desc("(Optional) Don't output serialized binary file")
.build();
Option readDepth = Option.builder("rd")
.longOpt("read-depth")
.desc("(Optional) The number of times to read each sequence.")
.hasArg()
.argName("depth")
.build();
Option readErrorProb = Option.builder("err")
.longOpt("read-error-prob")
.desc("(Optional) The probability that a sequence will be misread. (0.0 - 1.0)")
.hasArg()
.argName("prob")
.build();
Option errorCollisionProb = Option.builder("errcoll")
.longOpt("error-collision-prob")
.desc("(Optional) The probability that two misreads will produce the same spurious sequence. (0.0 - 1.0)")
.hasArg()
.argName("prob")
.build();
Option realSequenceCollisionProb = Option.builder("realcoll")
.longOpt("real-collision-prob")
.desc("(Optional) The probability that a sequence will be misread " +
"as another real sequence. (Only applies to unique misreads; after this has happened once, " +
"future error collisions could produce the real sequence again) (0.0 - 1.0)")
.hasArg()
.argName("prob")
.build();
graphOptions.addOption(cellFilename);
graphOptions.addOption(plateFilename);
graphOptions.addOption(outputFileOption());
graphOptions.addOption(outputGraphML);
graphOptions.addOption(outputSerializedBinary);
graphOptions.addOption(readDepth);
graphOptions.addOption(readErrorProb);
graphOptions.addOption(errorCollisionProb);
graphOptions.addOption(realSequenceCollisionProb);
return graphOptions;
}
@@ -426,12 +483,17 @@ public class CommandLineInterface {
.argName("filename")
.desc("(Optional) Name of output the output file. If not present, no file will be written.")
.build();
Option pValue = Option.builder("pv") //can't call the method this time, because this one's optional
.longOpt("p-value")
.desc("(Optional) Calculate p-values for sequence pairs.")
.build();
matchCDR3options.addOption(graphFilename)
.addOption(minOccupancyOverlap)
.addOption(maxOccupancyOverlap)
.addOption(minOverlapPercent)
.addOption(maxOccupancyDifference)
.addOption(outputFile);
.addOption(outputFile)
.addOption(pValue);
//options for output to System.out
Option printAlphaCount = Option.builder().longOpt("print-alphas")

View File

@@ -5,7 +5,6 @@ import org.jgrapht.nio.AttributeType;
import org.jgrapht.nio.DefaultAttribute;
import org.jgrapht.nio.graphml.GraphMLExporter;
import org.jgrapht.nio.graphml.GraphMLExporter.AttributeCategory;
import org.w3c.dom.Attr;
import java.io.BufferedWriter;
import java.io.IOException;
@@ -13,6 +12,7 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
public class GraphMLFileWriter {
@@ -41,11 +41,11 @@ public class GraphMLFileWriter {
}
private Map<String, Attribute> createGraphAttributes(){
Map<String, Attribute> ga = new HashMap<>();
Map<String, Attribute> attributes = new HashMap<>();
//Sample plate filename
ga.put("sample plate filename", DefaultAttribute.createAttribute(data.getSourceFilename()));
attributes.put("sample plate filename", DefaultAttribute.createAttribute(data.getPlateFilename()));
// Number of wells
ga.put("well count", DefaultAttribute.createAttribute(data.getNumWells().toString()));
attributes.put("well count", DefaultAttribute.createAttribute(data.getNumWells().toString()));
//Well populations
Integer[] wellPopulations = data.getWellPopulations();
StringBuilder populationsStringBuilder = new StringBuilder();
@@ -55,8 +55,37 @@ public class GraphMLFileWriter {
populationsStringBuilder.append(wellPopulations[i].toString());
}
String wellPopulationsString = populationsStringBuilder.toString();
ga.put("well populations", DefaultAttribute.createAttribute(wellPopulationsString));
return ga;
attributes.put("well populations", DefaultAttribute.createAttribute(wellPopulationsString));
attributes.put("read depth", DefaultAttribute.createAttribute(data.getReadDepth().toString()));
attributes.put("read error rate", DefaultAttribute.createAttribute(data.getReadErrorRate().toString()));
attributes.put("error collision rate", DefaultAttribute.createAttribute(data.getErrorCollisionRate().toString()));
attributes.put("real sequence collision rate", DefaultAttribute.createAttribute(data.getRealSequenceCollisionRate()));
return attributes;
}
private Map<String, Attribute> createVertexAttributes(Vertex v){
Map<String, Attribute> attributes = new HashMap<>();
//sequence type
attributes.put("type", DefaultAttribute.createAttribute(v.getType().name()));
//sequence
attributes.put("sequence", DefaultAttribute.createAttribute(v.getSequence()));
//number of wells the sequence appears in
attributes.put("occupancy", DefaultAttribute.createAttribute(v.getOccupancy()));
//total number of times the sequence was read
attributes.put("total read count", DefaultAttribute.createAttribute(v.getReadCount()));
StringBuilder wellsAndReadCountsBuilder = new StringBuilder();
Iterator<Map.Entry<Integer, Integer>> wellOccupancies = v.getWellOccupancies().entrySet().iterator();
while (wellOccupancies.hasNext()) {
Map.Entry<Integer, Integer> entry = wellOccupancies.next();
wellsAndReadCountsBuilder.append(entry.getKey() + ":" + entry.getValue());
if (wellOccupancies.hasNext()) {
wellsAndReadCountsBuilder.append(", ");
}
}
String wellsAndReadCounts = wellsAndReadCountsBuilder.toString();
//the wells the sequence appears in and the read counts in those wells
attributes.put("wells:read counts", DefaultAttribute.createAttribute(wellsAndReadCounts));
return attributes;
}
public void writeGraphToFile() {
@@ -69,13 +98,7 @@ public class GraphMLFileWriter {
//Set graph attributes
exporter.setGraphAttributeProvider( () -> graphAttributes);
//set type, sequence, and occupancy attributes for each vertex
exporter.setVertexAttributeProvider( v -> {
Map<String, Attribute> attributes = new HashMap<>();
attributes.put("type", DefaultAttribute.createAttribute(v.getType().name()));
attributes.put("sequence", DefaultAttribute.createAttribute(v.getSequence()));
attributes.put("occupancy", DefaultAttribute.createAttribute(v.getOccupancy()));
return attributes;
});
exporter.setVertexAttributeProvider(this::createVertexAttributes);
//register the attributes
for(String s : graphAttributes.keySet()) {
exporter.registerAttribute(s, AttributeCategory.GRAPH, AttributeType.STRING);
@@ -83,6 +106,8 @@ public class GraphMLFileWriter {
exporter.registerAttribute("type", AttributeCategory.NODE, AttributeType.STRING);
exporter.registerAttribute("sequence", AttributeCategory.NODE, AttributeType.STRING);
exporter.registerAttribute("occupancy", AttributeCategory.NODE, AttributeType.STRING);
exporter.registerAttribute("total read count", AttributeCategory.NODE, AttributeType.STRING);
exporter.registerAttribute("wells:read counts", AttributeCategory.NODE, AttributeType.STRING);
//export the graph
exporter.exportGraph(graph, writer);
} catch(IOException ex){

View File

@@ -8,11 +8,10 @@ import java.util.Map;
public interface GraphModificationFunctions {
//remove over- and under-weight edges
//remove over- and under-weight edges, return removed edges
static Map<Vertex[], Integer> filterByOverlapThresholds(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
int low, int high, boolean saveEdges) {
Map<Vertex[], Integer> removedEdges = new HashMap<>();
//List<Integer[]> removedEdges = new ArrayList<>();
for (DefaultWeightedEdge e : graph.edgeSet()) {
if ((graph.getEdgeWeight(e) > high) || (graph.getEdgeWeight(e) < low)) {
if(saveEdges) {
@@ -35,7 +34,7 @@ public interface GraphModificationFunctions {
return removedEdges;
}
//Remove edges for pairs with large occupancy discrepancy
//Remove edges for pairs with large occupancy discrepancy, return removed edges
static Map<Vertex[], Integer> filterByRelativeOccupancy(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
Integer maxOccupancyDifference, boolean saveEdges) {
Map<Vertex[], Integer> removedEdges = new HashMap<>();
@@ -63,7 +62,7 @@ public interface GraphModificationFunctions {
return removedEdges;
}
//Remove edges for pairs where overlap size is significantly lower than the well occupancy
//Remove edges for pairs where overlap size is significantly lower than the well occupancy, return removed edges
static Map<Vertex[], Integer> filterByOverlapPercent(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
Integer minOverlapPercent,
boolean saveEdges) {
@@ -94,6 +93,38 @@ public interface GraphModificationFunctions {
return removedEdges;
}
static Map<Vertex[], Integer> filterByRelativeReadCount (SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph, Integer threshold, boolean saveEdges) {
Map<Vertex[], Integer> removedEdges = new HashMap<>();
Boolean passes;
for (DefaultWeightedEdge e : graph.edgeSet()) {
Integer alphaReadCount = graph.getEdgeSource(e).getReadCount();
Integer betaReadCount = graph.getEdgeTarget(e).getReadCount();
passes = RelativeReadCountFilterFunction(threshold, alphaReadCount, betaReadCount);
if (!passes) {
if (saveEdges) {
Vertex source = graph.getEdgeSource(e);
Vertex target = graph.getEdgeTarget(e);
Integer intWeight = (int) graph.getEdgeWeight(e);
Vertex[] edge = {source, target};
removedEdges.put(edge, intWeight);
}
else {
graph.setEdgeWeight(e, 0.0);
}
}
}
if(saveEdges) {
for (Vertex[] edge : removedEdges.keySet()) {
graph.removeEdge(edge[0], edge[1]);
}
}
return removedEdges;
}
static Boolean RelativeReadCountFilterFunction(Integer threshold, Integer alphaReadCount, Integer betaReadCount) {
return Math.abs(alphaReadCount - betaReadCount) < threshold;
}
static void addRemovedEdges(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
Map<Vertex[], Integer> removedEdges) {
for (Vertex[] edge : removedEdges.keySet()) {
@@ -102,4 +133,6 @@ public interface GraphModificationFunctions {
}
}
}

View File

@@ -9,13 +9,20 @@ import java.util.Map;
//Custom vertex class means a lot of the map data can now be encoded in the graph itself
public class GraphWithMapData implements java.io.Serializable {
private String sourceFilename;
private String cellFilename;
private int cellSampleSize;
private String plateFilename;
private final SimpleWeightedGraph graph;
private Integer numWells;
private Integer[] wellPopulations;
private Integer alphaCount;
private Integer betaCount;
private final Map<Integer, Integer> distCellsMapAlphaKey;
private final int numWells;
private final Integer[] wellPopulations;
private final int alphaCount;
private final int betaCount;
private final double dropoutRate;
private final int readDepth;
private final double readErrorRate;
private final double errorCollisionRate;
private final double realSequenceCollisionRate;
private final Map<String, String> distCellsMapAlphaKey;
// private final Map<Integer, Integer> plateVtoAMap;
// private final Map<Integer, Integer> plateVtoBMap;
// private final Map<Integer, Integer> plateAtoVMap;
@@ -25,9 +32,11 @@ public class GraphWithMapData implements java.io.Serializable {
private final Duration time;
public GraphWithMapData(SimpleWeightedGraph graph, Integer numWells, Integer[] wellConcentrations,
Map<Integer, Integer> distCellsMapAlphaKey, Duration time){
Map<String, String> distCellsMapAlphaKey, Integer alphaCount, Integer betaCount,
Double dropoutRate, Integer readDepth, Double readErrorRate, Double errorCollisionRate,
Double realSequenceCollisionRate, Duration time){
// Map<Integer, Integer> plateVtoAMap, Integer alphaCount, Integer betaCount,
// Map<Integer, Integer> plateVtoAMap,
// Map<Integer,Integer> plateVtoBMap, Map<Integer, Integer> plateAtoVMap,
// Map<Integer, Integer> plateBtoVMap, Map<Integer, Integer> alphaWellCounts,
// Map<Integer, Integer> betaWellCounts,) {
@@ -43,6 +52,11 @@ public class GraphWithMapData implements java.io.Serializable {
// this.plateBtoVMap = plateBtoVMap;
// this.alphaWellCounts = alphaWellCounts;
// this.betaWellCounts = betaWellCounts;
this.dropoutRate = dropoutRate;
this.readDepth = readDepth;
this.readErrorRate = readErrorRate;
this.errorCollisionRate = errorCollisionRate;
this.realSequenceCollisionRate = realSequenceCollisionRate;
this.time = time;
}
@@ -58,15 +72,15 @@ public class GraphWithMapData implements java.io.Serializable {
return wellPopulations;
}
// public Integer getAlphaCount() {
// return alphaCount;
// }
//
// public Integer getBetaCount() {
// return betaCount;
// }
public Integer getAlphaCount() {
return alphaCount;
}
public Map<Integer, Integer> getDistCellsMapAlphaKey() {
public Integer getBetaCount() {
return betaCount;
}
public Map<String, String> getDistCellsMapAlphaKey() {
return distCellsMapAlphaKey;
}
@@ -94,15 +108,37 @@ public class GraphWithMapData implements java.io.Serializable {
// return betaWellCounts;
// }
public Integer getReadDepth() { return readDepth; }
public Duration getTime() {
return time;
}
public void setSourceFilename(String filename) {
this.sourceFilename = filename;
public void setCellFilename(String filename) { this.cellFilename = filename; }
public String getCellFilename() { return this.cellFilename; }
public Integer getCellSampleSize() { return this.cellSampleSize; }
public void setCellSampleSize(int size) { this.cellSampleSize = size;}
public void setPlateFilename(String filename) {
this.plateFilename = filename;
}
public String getSourceFilename() {
return sourceFilename;
public String getPlateFilename() {
return plateFilename;
}
public Double getReadErrorRate() {
return readErrorRate;
}
public Double getErrorCollisionRate() {
return errorCollisionRate;
}
public Double getRealSequenceCollisionRate() { return realSequenceCollisionRate; }
public Double getDropoutRate() { return dropoutRate; }
}

View File

@@ -0,0 +1,4 @@
public enum HeapType {
FIBONACCI,
PAIRING
}

View File

@@ -114,8 +114,8 @@ public class InteractiveInterface {
System.out.println("1) Poisson");
System.out.println("2) Gaussian");
System.out.println("3) Exponential");
System.out.println("(Note: approximate distribution in original paper is exponential, lambda = 0.6)");
System.out.println("(lambda value approximated from slope of log-log graph in figure 4c)");
// System.out.println("(Note: approximate distribution in original paper is exponential, lambda = 0.6)");
// System.out.println("(lambda value approximated from slope of log-log graph in figure 4c)");
System.out.println("(Note: wider distributions are more memory intensive to match)");
System.out.print("Enter selection value: ");
input = sc.nextInt();
@@ -250,6 +250,12 @@ public class InteractiveInterface {
String filename = null;
String cellFile = null;
String plateFile = null;
Boolean simulateReadDepth = false;
//number of times to read each sequence in a well
int readDepth = 1;
double readErrorRate = 0.0;
double errorCollisionRate = 0.0;
double realSequenceCollisionRate = 0.0;
try {
String str = "\nGenerating bipartite weighted graph encoding occupancy overlap data ";
str = str.concat("\nrequires a cell sample file and a sample plate file.");
@@ -258,6 +264,38 @@ public class InteractiveInterface {
cellFile = sc.next();
System.out.print("\nPlease enter name of an existing sample plate file: ");
plateFile = sc.next();
System.out.println("\nEnable simulation of sequence read depth and sequence read errors? (y/n)");
String ans = sc.next();
Pattern pattern = Pattern.compile("(?:yes|y)", Pattern.CASE_INSENSITIVE);
Matcher matcher = pattern.matcher(ans);
if(matcher.matches()){
simulateReadDepth = true;
}
if (simulateReadDepth) {
System.out.print("\nPlease enter the read depth (the integer number of times a sequence is read): ");
readDepth = sc.nextInt();
if(readDepth < 1) {
throw new InputMismatchException("The read depth must be an integer >= 1");
}
System.out.println("\nPlease enter the read error probability (0.0 to 1.0)");
System.out.print("(The probability that a sequence will be misread): ");
readErrorRate = sc.nextDouble();
if(readErrorRate < 0.0 || readErrorRate > 1.0) {
throw new InputMismatchException("The read error probability must be in the range [0.0, 1.0]");
}
System.out.println("\nPlease enter the error collision probability (0.0 to 1.0)");
System.out.print("(The probability of a sequence being misread in a way it has been misread before): ");
errorCollisionRate = sc.nextDouble();
if(errorCollisionRate < 0.0 || errorCollisionRate > 1.0) {
throw new InputMismatchException("The error collision probability must be an in the range [0.0, 1.0]");
}
System.out.println("\nPlease enter the real sequence collision probability (0.0 to 1.0)");
System.out.print("(The probability that a (non-collision) misread produces a different, real sequence): ");
realSequenceCollisionRate = sc.nextDouble();
if(realSequenceCollisionRate < 0.0 || realSequenceCollisionRate > 1.0) {
throw new InputMismatchException("The real sequence collision probability must be an in the range [0.0, 1.0]");
}
}
System.out.println("\nThe graph and occupancy data will be written to a file.");
System.out.print("Please enter a name for the output file: ");
filename = sc.next();
@@ -304,7 +342,8 @@ public class InteractiveInterface {
System.out.println("Returning to main menu.");
}
else{
GraphWithMapData data = Simulator.makeGraph(cellSample, plate, true);
GraphWithMapData data = Simulator.makeCDR3Graph(cellSample, plate, readDepth, readErrorRate,
errorCollisionRate, realSequenceCollisionRate, true);
assert filename != null;
if(BiGpairSEQ.outputBinary()) {
GraphDataObjectWriter dataWriter = new GraphDataObjectWriter(filename, data);
@@ -383,7 +422,7 @@ public class InteractiveInterface {
}
//simulate matching
MatchingResult results = Simulator.matchCDR3s(data, graphFilename, lowThreshold, highThreshold, maxOccupancyDiff,
minOverlapPercent, true);
minOverlapPercent, true, BiGpairSEQ.calculatePValue());
//write results to file
assert filename != null;
MatchingFileWriter writer = new MatchingFileWriter(filename, results);
@@ -505,7 +544,8 @@ public class InteractiveInterface {
System.out.println("3) Turn " + getOnOff(!BiGpairSEQ.cacheGraph()) + " graph/data file caching");
System.out.println("4) Turn " + getOnOff(!BiGpairSEQ.outputBinary()) + " serialized binary graph output");
System.out.println("5) Turn " + getOnOff(!BiGpairSEQ.outputGraphML()) + " GraphML graph output (for data portability to other programs)");
System.out.println("6) Maximum weight matching algorithm options");
System.out.println("6) Turn " + getOnOff(!BiGpairSEQ.calculatePValue()) + " calculation of p-values");
System.out.println("7) Maximum weight matching algorithm options");
System.out.println("0) Return to main menu");
try {
input = sc.nextInt();
@@ -515,7 +555,8 @@ public class InteractiveInterface {
case 3 -> BiGpairSEQ.setCacheGraph(!BiGpairSEQ.cacheGraph());
case 4 -> BiGpairSEQ.setOutputBinary(!BiGpairSEQ.outputBinary());
case 5 -> BiGpairSEQ.setOutputGraphML(!BiGpairSEQ.outputGraphML());
case 6 -> algorithmOptions();
case 6 -> BiGpairSEQ.setCalculatePValue(!BiGpairSEQ.calculatePValue());
case 7 -> algorithmOptions();
case 0 -> backToMain = true;
default -> System.out.println("Invalid input");
}

View File

@@ -9,27 +9,34 @@ public class MatchingResult {
private final List<String> comments;
private final List<String> headers;
private final List<List<String>> allResults;
private final Map<Integer, Integer> matchMap;
private final Duration time;
private final Map<String, String> matchMap;
public MatchingResult(Map<String, String> metadata, List<String> headers,
List<List<String>> allResults, Map<Integer, Integer>matchMap, Duration time){
List<List<String>> allResults, Map<String, String>matchMap){
/*
* POSSIBLE KEYS FOR METADATA MAP ARE:
* sample plate filename *
* graph filename *
* matching weight *
* well populations *
* total alphas found *
* total betas found *
* high overlap threshold *
* low overlap threshold *
* maximum occupancy difference *
* minimum overlap percent *
* sequence read depth *
* sequence read error rate *
* read error collision rate *
* total alphas read from plate *
* total betas read from plate *
* alphas in graph (after pre-filtering) *
* betas in graph (after pre-filtering) *
* high overlap threshold for pairing *
* low overlap threshold for pairing *
* maximum occupancy difference for pairing *
* minimum overlap percent for pairing *
* pairing attempt rate *
* correct pairing count *
* incorrect pairing count *
* pairing error rate *
* simulation time (seconds)
* time to generate graph (seconds) *
* time to pair sequences (seconds) *
* total simulation time (seconds) *
*/
this.metadata = metadata;
this.comments = new ArrayList<>();
@@ -39,8 +46,6 @@ public class MatchingResult {
this.headers = headers;
this.allResults = allResults;
this.matchMap = matchMap;
this.time = time;
}
public Map<String, String> getMetadata() {return metadata;}
@@ -57,13 +62,13 @@ public class MatchingResult {
return headers;
}
public Map<Integer, Integer> getMatchMap() {
public Map<String, String> getMatchMap() {
return matchMap;
}
public Duration getTime() {
return time;
}
// public Duration getTime() {
// return time;
// }
public String getPlateFilename() {
return metadata.get("sample plate filename");
@@ -84,20 +89,20 @@ public class MatchingResult {
}
public Integer getAlphaCount() {
return Integer.parseInt(metadata.get("total alpha count"));
return Integer.parseInt(metadata.get("total alphas read from plate"));
}
public Integer getBetaCount() {
return Integer.parseInt(metadata.get("total beta count"));
return Integer.parseInt(metadata.get("total betas read from plate"));
}
public Integer getHighOverlapThreshold() { return Integer.parseInt(metadata.get("high overlap threshold"));}
public Integer getHighOverlapThreshold() { return Integer.parseInt(metadata.get("high overlap threshold for pairing"));}
public Integer getLowOverlapThreshold() { return Integer.parseInt(metadata.get("low overlap threshold"));}
public Integer getLowOverlapThreshold() { return Integer.parseInt(metadata.get("low overlap threshold for pairing"));}
public Integer getMaxOccupancyDifference() { return Integer.parseInt(metadata.get("maximum occupancy difference"));}
public Integer getMaxOccupancyDifference() { return Integer.parseInt(metadata.get("maximum occupancy difference for pairing"));}
public Integer getMinOverlapPercent() { return Integer.parseInt(metadata.get("minimum overlap percent"));}
public Integer getMinOverlapPercent() { return Integer.parseInt(metadata.get("minimum overlap percent for pairing"));}
public Double getPairingAttemptRate() { return Double.parseDouble(metadata.get("pairing attempt rate"));}
@@ -107,6 +112,6 @@ public class MatchingResult {
public Double getPairingErrorRate() { return Double.parseDouble(metadata.get("pairing error rate"));}
public String getSimulationTime() { return metadata.get("simulation time (seconds)"); }
public String getSimulationTime() { return metadata.get("total simulation time (seconds)"); }
}

View File

@@ -2,16 +2,24 @@
/*
TODO: Implement exponential distribution using inversion method - DONE
TODO: Implement collisions with real sequences by having the counting function keep a map of all sequences it's read,
with values of all misreads. Can then have a spurious/real collision rate, which will have count randomly select a sequence
it's already read at least once, and put that into the list of spurious sequences for the given real sequence. Will let me get rid
of the distinctMisreadCount map, and use this new map instead. Doing it this way, once a sequence has been misread as another
sequence once, it is more likely to be misread that way again, as future read error collisions can also be real sequence collisions
Prob A: a read error occurs. Prob B: it's a new error (otherwise it's a repeated error). Prob C: if new error, prob that it's
a real sequence collision (otherwise it's a new spurious sequence) - DONE
TODO: Implement discrete frequency distributions using Vose's Alias Method
*/
import java.util.*;
public class Plate {
private CellSample cells;
private String sourceFile;
private String filename;
private List<List<Integer[]>> wells;
private List<List<String[]>> wells;
private final Random rand = BiGpairSEQ.getRand();
private int size;
private double error;
@@ -48,13 +56,13 @@ public class Plate {
}
//constructor for returning a Plate from a PlateFileReader
public Plate(String filename, List<List<Integer[]>> wells) {
public Plate(String filename, List<List<String[]>> wells) {
this.filename = filename;
this.wells = wells;
this.size = wells.size();
List<Integer> concentrations = new ArrayList<>();
for (List<Integer[]> w: wells) {
for (List<String[]> w: wells) {
if(!concentrations.contains(w.size())){
concentrations.add(w.size());
}
@@ -65,7 +73,7 @@ public class Plate {
}
}
private void fillWellsExponential(List<Integer[]> cells, double lambda){
private void fillWellsExponential(List<String[]> cells, double lambda){
this.lambda = lambda;
exponential = true;
int numSections = populations.length;
@@ -74,17 +82,17 @@ public class Plate {
int n;
while (section < numSections){
for (int i = 0; i < (size / numSections); i++) {
List<Integer[]> well = new ArrayList<>();
List<String[]> well = new ArrayList<>();
for (int j = 0; j < populations[section]; j++) {
do {
//inverse transform sampling: for random number u in [0,1), x = log(1-u) / (-lambda)
m = (Math.log10((1 - rand.nextDouble()))/(-lambda)) * Math.sqrt(cells.size());
} while (m >= cells.size() || m < 0);
n = (int) Math.floor(m);
Integer[] cellToAdd = cells.get(n).clone();
String[] cellToAdd = cells.get(n).clone();
for(int k = 0; k < cellToAdd.length; k++){
if(Math.abs(rand.nextDouble()) < error){//error applied to each seqeunce
cellToAdd[k] = -1;
if(Math.abs(rand.nextDouble()) <= error){//error applied to each sequence
cellToAdd[k] = "-1";
}
}
well.add(cellToAdd);
@@ -95,7 +103,7 @@ public class Plate {
}
}
private void fillWells( List<Integer[]> cells, double stdDev) {
private void fillWells( List<String[]> cells, double stdDev) {
this.stdDev = stdDev;
int numSections = populations.length;
int section = 0;
@@ -103,16 +111,16 @@ public class Plate {
int n;
while (section < numSections){
for (int i = 0; i < (size / numSections); i++) {
List<Integer[]> well = new ArrayList<>();
List<String[]> well = new ArrayList<>();
for (int j = 0; j < populations[section]; j++) {
do {
m = (rand.nextGaussian() * stdDev) + (cells.size() / 2);
} while (m >= cells.size() || m < 0);
n = (int) Math.floor(m);
Integer[] cellToAdd = cells.get(n).clone();
String[] cellToAdd = cells.get(n).clone();
for(int k = 0; k < cellToAdd.length; k++){
if(Math.abs(rand.nextDouble()) < error){//error applied to each sequence
cellToAdd[k] = -1;
cellToAdd[k] = "-1";
}
}
well.add(cellToAdd);
@@ -143,37 +151,107 @@ public class Plate {
return error;
}
public List<List<Integer[]>> getWells() {
public List<List<String[]>> getWells() {
return wells;
}
//returns a map of the counts of the sequence at cell index sIndex, in all wells
public Map<Integer, Integer> assayWellsSequenceS(int... sIndices){
return this.assayWellsSequenceS(0, size, sIndices);
}
//returns a map of the counts of the sequence at cell index sIndex, in a specific well
public Map<Integer, Integer> assayWellsSequenceS(int n, int... sIndices) { return this.assayWellsSequenceS(n, n+1, sIndices);}
//returns a map of the counts of the sequence at cell index sIndex, in a range of wells
public Map<Integer, Integer> assayWellsSequenceS(int start, int end, int... sIndices) {
Map<Integer,Integer> assay = new HashMap<>();
for(int pIndex: sIndices){
for(int i = start; i < end; i++){
countSequences(assay, wells.get(i), pIndex);
}
}
return assay;
}
//For the sequences at cell indices sIndices, counts number of unique sequences in the given well into the given map
private void countSequences(Map<Integer, Integer> wellMap, List<Integer[]> well, int... sIndices) {
for(Integer[] cell : well) {
for(int sIndex: sIndices){
if(cell[sIndex] != -1){
wellMap.merge(cell[sIndex], 1, (oldValue, newValue) -> oldValue + newValue);
//For the sequences at cell indices sIndices, counts number of unique sequences in all wells.
//Also simulates sequence read errors with given probabilities.
//Returns a map of SequenceRecords containing plate data for all sequences read.
//TODO actually implement usage of misreadSequences - DONE
public Map<String, SequenceRecord> countSequences(Integer readDepth, Double readErrorRate,
Double errorCollisionRate, Double realSequenceCollisionRate, int... sIndices) {
SequenceType[] sequenceTypes = EnumSet.allOf(SequenceType.class).toArray(new SequenceType[0]);
//Map of all real sequences read. Keys are sequences, values are ways sequence has been misread.
Map<String, List<String>> sequencesAndMisreads = new HashMap<>();
//Map of all sequences read. Keys are sequences, values are associated SequenceRecords
Map<String, SequenceRecord> sequenceMap = new LinkedHashMap<>();
//get list of all distinct, real sequences
String[] realSequences = assayWells(sIndices).toArray(new String[0]);
for (int well = 0; well < size; well++) {
for (String[] cell: wells.get(well)) {
for (int sIndex: sIndices) {
//the sequence being read
String currentSequence = cell[sIndex];
//skip dropout sequences, which have value -1
if (!"-1".equals(currentSequence)) {
//keep rereading the sequence until the read depth is reached
for (int j = 0; j < readDepth; j++) {
//The sequence is misread
if (rand.nextDouble() < readErrorRate) {
//The sequence hasn't been read or misread before
if (!sequencesAndMisreads.containsKey(currentSequence)) {
sequencesAndMisreads.put(currentSequence, new ArrayList<>());
}
//The specific misread hasn't happened before
if (rand.nextDouble() >= errorCollisionRate || sequencesAndMisreads.get(currentSequence).size() == 0) {
//The misread doesn't collide with a real sequence already on the plate and some sequences have already been read
if(rand.nextDouble() >= realSequenceCollisionRate || !sequenceMap.isEmpty()){
StringBuilder spurious = new StringBuilder(currentSequence);
for (int k = 0; k <= sequencesAndMisreads.get(currentSequence).size(); k++) {
spurious.append("*");
}
//New sequence record for the spurious sequence
SequenceRecord tmp = new SequenceRecord(spurious.toString(), sequenceTypes[sIndex]);
tmp.addRead(well);
sequenceMap.put(spurious.toString(), tmp);
//add spurious sequence to list of misreads for the real sequence
sequencesAndMisreads.get(currentSequence).add(spurious.toString());
}
//The misread collides with a real sequence already read from plate
else {
String wrongSequence;
do{
//get a random real sequence that's been read from the plate before
int index = rand.nextInt(realSequences.length);
wrongSequence = realSequences[index];
//make sure it's not accidentally the *right* sequence
//Also that it's not a wrong sequence already in the misread list
} while(currentSequence.equals(wrongSequence) || sequencesAndMisreads.get(currentSequence).contains(wrongSequence));
//update the SequenceRecord for wrongSequence
sequenceMap.get(wrongSequence).addRead(well);
//add wrongSequence to the misreads for currentSequence
sequencesAndMisreads.get(currentSequence).add(wrongSequence);
}
}
}
//The sequence is read correctly
else {
//the sequence hasn't been read before
if (!sequenceMap.containsKey(currentSequence)) {
//create new record for the sequence
SequenceRecord tmp = new SequenceRecord(currentSequence, sequenceTypes[sIndex]);
//add this read to the sequence record
tmp.addRead(well);
//add the sequence and its record to the sequence map
sequenceMap.put(currentSequence, tmp);
//add the sequence to the sequences and misreads map
sequencesAndMisreads.put(currentSequence, new ArrayList<>());
}
//the sequence has been read before
else {
//get the sequence's record and add this read to it
sequenceMap.get(currentSequence).addRead(well);
}
}
}
}
}
}
}
return sequenceMap;
}
private HashSet<String> assayWells(int[] indices) {
HashSet<String> allSequences = new HashSet<>();
for (List<String[]> well: wells) {
for (String[] cell: well) {
for(int index: indices) {
allSequences.add(cell[index]);
}
}
}
return allSequences;
}
public String getSourceFileName() {

View File

@@ -13,7 +13,7 @@ import java.util.regex.Pattern;
public class PlateFileReader {
private List<List<Integer[]>> wells = new ArrayList<>();
private List<List<String[]>> wells = new ArrayList<>();
private String filename;
public PlateFileReader(String filename){
@@ -32,17 +32,17 @@ public class PlateFileReader {
CSVParser parser = new CSVParser(reader, plateFileFormat);
){
for(CSVRecord record: parser.getRecords()) {
List<Integer[]> well = new ArrayList<>();
List<String[]> well = new ArrayList<>();
for(String s: record) {
if(!"".equals(s)) {
String[] intString = s.replaceAll("\\[", "")
String[] sequences = s.replaceAll("\\[", "")
.replaceAll("]", "")
.replaceAll(" ", "")
.split(",");
//System.out.println(intString);
Integer[] arr = new Integer[intString.length];
for (int i = 0; i < intString.length; i++) {
arr[i] = Integer.valueOf(intString[i]);
//System.out.println(sequences);
String[] arr = new String[sequences.length];
for (int i = 0; i < sequences.length; i++) {
arr[i] = sequences[i];
}
well.add(arr);
}

View File

@@ -10,7 +10,7 @@ import java.util.*;
public class PlateFileWriter {
private int size;
private List<List<Integer[]>> wells;
private List<List<String[]>> wells;
private double stdDev;
private double lambda;
private Double error;
@@ -40,13 +40,13 @@ public class PlateFileWriter {
}
public void writePlateFile(){
Comparator<List<Integer[]>> listLengthDescending = Comparator.comparingInt(List::size);
Comparator<List<String[]>> listLengthDescending = Comparator.comparingInt(List::size);
wells.sort(listLengthDescending.reversed());
int maxLength = wells.get(0).size();
List<List<String>> wellsAsStrings = new ArrayList<>();
for (List<Integer[]> w: wells){
for (List<String[]> w: wells){
List<String> tmp = new ArrayList<>();
for(Integer[] c: w) {
for(String[] c: w) {
tmp.add(Arrays.toString(c));
}
wellsAsStrings.add(tmp);

View File

@@ -0,0 +1,70 @@
/*
Class to represent individual sequences, holding their well occupancy and read count information.
Will make a map of these keyed to the sequences themselves.
Ideally, I'll be able to construct both the Vertices and the weights matrix from this map.
*/
import java.io.Serializable;
import java.util.*;
public class SequenceRecord implements Serializable {
private final String sequence;
private final SequenceType type;
//keys are well numbers, values are read count in that well
private final Map<Integer, Integer> wells;
public SequenceRecord (String sequence, SequenceType type) {
this.sequence = sequence;
this.type = type;
this.wells = new LinkedHashMap<>();
}
//this shouldn't be necessary, since the sequence will be the map key, but
public String getSequence() {
return sequence;
}
public SequenceType getSequenceType(){
return type;
}
//use this to update the record for each new read
public void addRead(Integer wellNumber) {
wells.merge(wellNumber,1, Integer::sum);
}
//don't know if I'll ever need this
public void addWellData(Integer wellNumber, Integer readCount) {
wells.put(wellNumber, readCount);
}
//Method to remove a well from the occupancy map.
//Useful for cases where one sequence is misread as another sequence that isn't actually present in the well
//This can reveal itself as an anomalously low read count in that well.
public void deleteWell(Integer wellNumber) { wells.remove(wellNumber); }
public Set<Integer> getWells() {
return wells.keySet();
}
public Map<Integer, Integer> getWellOccupancies() { return wells;}
public boolean isInWell(Integer wellNumber) {
return wells.containsKey(wellNumber);
}
public Integer getOccupancy() {
return wells.size();
}
//read count for whole plate
public Integer getReadCount(){
return wells.values().stream().mapToInt(Integer::valueOf).sum();
}
//read count in a specific well
public Integer getReadCount(Integer wellNumber) {
return wells.get(wellNumber);
}
}

View File

@@ -12,120 +12,129 @@ import java.text.NumberFormat;
import java.time.Instant;
import java.time.Duration;
import java.util.*;
import java.util.stream.IntStream;
import static java.lang.Float.*;
//NOTE: "sequence" in method and variable names refers to a peptide sequence from a simulated T cell
public class Simulator implements GraphModificationFunctions {
//Make the graph needed for matching sequences.
//sourceVertexIndices and targetVertexIndices are indices within the cell to use as for the two sets of vertices
//in the bipartite graph. "Source" and "target" are JGraphT terms for the two vertices an edge touches,
//even if not directed.
public static GraphWithMapData makeGraph(CellSample cellSample, Plate samplePlate, boolean verbose) {
public static GraphWithMapData makeCDR3Graph(CellSample cellSample, Plate samplePlate, int readDepth,
double readErrorRate, double errorCollisionRate,
double realSequenceCollisionRate, boolean verbose) {
//start timing
Instant start = Instant.now();
List<Integer[]> distinctCells = cellSample.getCells();
int[] alphaIndices = {SequenceType.CDR3_ALPHA.ordinal()};
int[] betaIndices = {SequenceType.CDR3_BETA.ordinal()};
List<String[]> distinctCells = cellSample.getCells();
int numWells = samplePlate.getSize();
//Make a hashmap keyed to alphas, values are associated betas.
if(verbose){System.out.println("Making cell maps");}
//HashMap keyed to Alphas, values Betas
Map<Integer, Integer> distCellsMapAlphaKey = makeSequenceToSequenceMap(distinctCells, 0, 1);
Map<String, String> distCellsMapAlphaKey = makeSequenceToSequenceMap(distinctCells,
SequenceType.CDR3_ALPHA.ordinal(), SequenceType.CDR3_BETA.ordinal());
if(verbose){System.out.println("Cell maps made");}
if(verbose){System.out.println("Making well maps");}
//Make linkedHashMap keyed to sequences, values are SequenceRecords reflecting plate statistics
if(verbose){System.out.println("Making sample plate sequence maps");}
Map<String, SequenceRecord> alphaSequences = samplePlate.countSequences(readDepth, readErrorRate,
errorCollisionRate, realSequenceCollisionRate, alphaIndices);
int alphaCount = alphaSequences.size();
if(verbose){System.out.println("Alphas sequences read: " + alphaCount);}
Map<String, SequenceRecord> betaSequences = samplePlate.countSequences(readDepth, readErrorRate,
errorCollisionRate, realSequenceCollisionRate, betaIndices);
int betaCount = betaSequences.size();
if(verbose){System.out.println("Betas sequences read: " + betaCount);}
if(verbose){System.out.println("Sample plate sequence maps made");}
Map<Integer, Integer> allAlphas = samplePlate.assayWellsSequenceS(alphaIndices);
Map<Integer, Integer> allBetas = samplePlate.assayWellsSequenceS(betaIndices);
int alphaCount = allAlphas.size();
if(verbose){System.out.println("All alphas count: " + alphaCount);}
int betaCount = allBetas.size();
if(verbose){System.out.println("All betas count: " + betaCount);}
if(verbose){System.out.println("Well maps made");}
// if(verbose){System.out.println("Removing singleton sequences and sequences present in all wells.");}
// filterByOccupancyThresholds(allAlphas, 2, numWells - 1);
// filterByOccupancyThresholds(allBetas, 2, numWells - 1);
// if(verbose){System.out.println("Sequences removed");}
int pairableAlphaCount = allAlphas.size();
if(verbose){System.out.println("Remaining alphas count: " + pairableAlphaCount);}
int pairableBetaCount = allBetas.size();
if(verbose){System.out.println("Remaining betas count: " + pairableBetaCount);}
//pre-filter saturating sequences and sequences likely to be misreads
if(verbose){System.out.println("Removing sequences present in all wells.");}
filterByOccupancyThresholds(alphaSequences, 1, numWells - 1);
filterByOccupancyThresholds(betaSequences, 1, numWells - 1);
if(verbose){System.out.println("Sequences removed");}
if(verbose){System.out.println("Remaining alpha sequence count: " + alphaSequences.size());}
if(verbose){System.out.println("Remaining beta sequence count: " + betaSequences.size());}
if (readDepth > 1) {
if(verbose){System.out.println("Removing sequences with disparate occupancies and read counts");}
filterByOccupancyAndReadCount(alphaSequences, readDepth);
filterByOccupancyAndReadCount(betaSequences, readDepth);
if(verbose){System.out.println("Sequences removed");}
if(verbose){System.out.println("Remaining alpha sequence count: " + alphaSequences.size());}
if(verbose){System.out.println("Remaining beta sequence count: " + betaSequences.size());}
}
if (realSequenceCollisionRate > 0.0) {
if(verbose){System.out.println("Removing wells with anomalous read counts from sequence records");}
int alphaWellsRemoved = filterWellsByReadCount(alphaSequences);
int betaWellsRemoved = filterWellsByReadCount(betaSequences);
if(verbose){System.out.println("Wells with anomalous read counts removed from sequence records");}
if(verbose){System.out.println("Total alpha sequence wells removed: " + alphaWellsRemoved);}
if(verbose){System.out.println("Total beta sequence wells removed: " + betaWellsRemoved);}
}
//construct the graph. For simplicity, going to make
if(verbose){System.out.println("Making vertex maps");}
//For the SimpleWeightedBipartiteGraphMatrixGenerator, all vertices must have
//distinct numbers associated with them. Since I'm using a 2D array, that means
//distinct indices between the rows and columns. vertexStartValue lets me track where I switch
//from numbering rows to columns, so I can assign unique numbers to every vertex, and then
//subtract the vertexStartValue from betas to use their vertex labels as array indices
Integer vertexStartValue = 0;
int vertexStartValue = 0;
//keys are sequential integer vertices, values are alphas
Map<Integer, Integer> plateVtoAMap = makeVertexToSequenceMap(allAlphas, vertexStartValue);
Map<String, Integer> plateAtoVMap = makeSequenceToVertexMap(alphaSequences, vertexStartValue);
//new start value for vertex to beta map should be one more than final vertex value in alpha map
vertexStartValue += plateVtoAMap.size();
//keys are sequential integers vertices, values are betas
Map<Integer, Integer> plateVtoBMap = makeVertexToSequenceMap(allBetas, vertexStartValue);
//keys are alphas, values are sequential integer vertices from previous map
Map<Integer, Integer> plateAtoVMap = invertVertexMap(plateVtoAMap);
//keys are betas, values are sequential integer vertices from previous map
Map<Integer, Integer> plateBtoVMap = invertVertexMap(plateVtoBMap);
vertexStartValue += plateAtoVMap.size();
//keys are betas, values are sequential integers
Map<String, Integer> plateBtoVMap = makeSequenceToVertexMap(betaSequences, vertexStartValue);
if(verbose){System.out.println("Vertex maps made");}
//make adjacency matrix for bipartite graph generator
//(technically this is only 1/4 of an adjacency matrix, but that's all you need
//for a bipartite graph, and all the SimpleWeightedBipartiteGraphMatrixGenerator class expects.)
if(verbose){System.out.println("Creating adjacency matrix");}
//Count how many wells each alpha sequence appears in
Map<Integer, Integer> alphaWellCounts = new HashMap<>();
//count how many wells each beta sequence appears in
Map<Integer, Integer> betaWellCounts = new HashMap<>();
//the adjacency matrix to be used by the graph generator
double[][] weights = new double[plateVtoAMap.size()][plateVtoBMap.size()];
countSequencesAndFillMatrix(samplePlate, allAlphas, allBetas, plateAtoVMap,
plateBtoVMap, alphaIndices, betaIndices, alphaWellCounts, betaWellCounts, weights);
if(verbose){System.out.println("Matrix created");}
//create bipartite graph
if(verbose){System.out.println("Creating graph");}
if(verbose){System.out.println("Making adjacency matrix");}
double[][] weights = new double[plateAtoVMap.size()][plateBtoVMap.size()];
fillAdjacencyMatrix(weights, vertexStartValue, alphaSequences, betaSequences, plateAtoVMap, plateBtoVMap);
if(verbose){System.out.println("Adjacency matrix made");}
//make bipartite graph
if(verbose){System.out.println("Making bipartite weighted graph");}
//the graph object
SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph =
new SimpleWeightedGraph<>(DefaultWeightedEdge.class);
//the graph generator
SimpleWeightedBipartiteGraphMatrixGenerator graphGenerator = new SimpleWeightedBipartiteGraphMatrixGenerator();
//the list of alpha vertices
//List<Integer> alphaVertices = new ArrayList<>(plateVtoAMap.keySet()); //This will work because LinkedHashMap preserves order of entry
List<Vertex> alphaVertices = new ArrayList<>();
//start with map of all alphas mapped to vertex values, get occupancy from the alphaWellCounts map
for (Integer seq : plateAtoVMap.keySet()) {
Vertex alphaVertex = new Vertex(SequenceType.CDR3_ALPHA, seq, alphaWellCounts.get(seq), plateAtoVMap.get(seq));
for (String seq : plateAtoVMap.keySet()) {
Vertex alphaVertex = new Vertex(alphaSequences.get(seq), plateAtoVMap.get(seq));
alphaVertices.add(alphaVertex);
}
//Sort to make sure the order of vertices in list matches the order of the adjacency matrix
Collections.sort(alphaVertices);
//Add ordered list of vertices to the graph
graphGenerator.first(alphaVertices);
//the list of beta vertices
//List<Integer> betaVertices = new ArrayList<>(plateVtoBMap.keySet());//This will work because LinkedHashMap preserves order of entry
List<Vertex> betaVertices = new ArrayList<>();
for (Integer seq : plateBtoVMap.keySet()) {
Vertex betaVertex = new Vertex(SequenceType.CDR3_BETA, seq, betaWellCounts.get(seq), plateBtoVMap.get(seq));
for (String seq : plateBtoVMap.keySet()) {
Vertex betaVertex = new Vertex(betaSequences.get(seq), plateBtoVMap.get(seq));
betaVertices.add(betaVertex);
}
//Sort to make sure the order of vertices in list matches the order of the adjacency matrix
Collections.sort(betaVertices);
//Add ordered list of vertices to the graph
graphGenerator.second(betaVertices);
//use adjacency matrix of weight created previously
graphGenerator.weights(weights);
graphGenerator.generateGraph(graph);
if(verbose){System.out.println("Graph created");}
//stop timing
Instant stop = Instant.now();
Duration time = Duration.between(start, stop);
//create GraphWithMapData object
GraphWithMapData output = new GraphWithMapData(graph, numWells, samplePlate.getPopulations(), distCellsMapAlphaKey, time);
//Set source file name in graph to name of sample plate
output.setSourceFilename(samplePlate.getFilename());
GraphWithMapData output = new GraphWithMapData(graph, numWells, samplePlate.getPopulations(), distCellsMapAlphaKey,
alphaCount, betaCount, samplePlate.getError(), readDepth, readErrorRate, errorCollisionRate, realSequenceCollisionRate, time);
//Set cell sample file name in graph to name of cell sample
output.setCellFilename(cellSample.getFilename());
//Set cell sample size in graph
output.setCellSampleSize(cellSample.getCellCount());
//Set sample plate file name in graph to name of sample plate
output.setPlateFilename(samplePlate.getFilename());
//return GraphWithMapData object
return output;
}
@@ -133,7 +142,7 @@ public class Simulator implements GraphModificationFunctions {
//match CDR3s.
public static MatchingResult matchCDR3s(GraphWithMapData data, String dataFilename, Integer lowThreshold,
Integer highThreshold, Integer maxOccupancyDifference,
Integer minOverlapPercent, boolean verbose) {
Integer minOverlapPercent, boolean verbose, boolean calculatePValue) {
Instant start = Instant.now();
SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph = data.getGraph();
Map<Vertex[], Integer> removedEdges = new HashMap<>();
@@ -141,7 +150,7 @@ public class Simulator implements GraphModificationFunctions {
int numWells = data.getNumWells();
//Integer alphaCount = data.getAlphaCount();
//Integer betaCount = data.getBetaCount();
Map<Integer, Integer> distCellsMapAlphaKey = data.getDistCellsMapAlphaKey();
Map<String, String> distCellsMapAlphaKey = data.getDistCellsMapAlphaKey();
Set<Vertex> alphas = new HashSet<>();
Set<Vertex> betas = new HashSet<>();
for(Vertex v: graph.vertexSet()) {
@@ -152,8 +161,8 @@ public class Simulator implements GraphModificationFunctions {
betas.add(v);
}
}
Integer alphaCount = alphas.size();
Integer betaCount = betas.size();
Integer graphAlphaCount = alphas.size();
Integer graphBetaCount = betas.size();
//remove edges with weights outside given overlap thresholds, add those to removed edge list
if(verbose){System.out.println("Eliminating edges with weights outside overlap threshold values");}
@@ -173,9 +182,9 @@ public class Simulator implements GraphModificationFunctions {
if(verbose){System.out.println("Edges between vertices of with excessively different occupancy values " +
"removed");}
//Find Maximum Weighted Matching
//Find Maximum Weight Matching
//using jheaps library class PairingHeap for improved efficiency
if(verbose){System.out.println("Finding maximum weighted matching");}
if(verbose){System.out.println("Finding maximum weight matching");}
MaximumWeightBipartiteMatching maxWeightMatching;
//Use correct heap type for priority queue
String heapType = BiGpairSEQ.getPriorityQueueHeapType();
@@ -211,7 +220,7 @@ public class Simulator implements GraphModificationFunctions {
header.add("Beta well count");
header.add("Overlap well count");
header.add("Matched correctly?");
header.add("P-value");
if(calculatePValue) { header.add("P-value"); }
//Results for csv file
List<List<String>> allResults = new ArrayList<>();
@@ -222,17 +231,14 @@ public class Simulator implements GraphModificationFunctions {
int trueCount = 0;
int falseCount = 0;
boolean check;
Map<Integer, Integer> matchMap = new HashMap<>();
Map<String, String> matchMap = new HashMap<>();
while(weightIter.hasNext()) {
e = weightIter.next();
Vertex source = graph.getEdgeSource(e);
Vertex target = graph.getEdgeTarget(e);
//Integer source = graph.getEdgeSource(e);
//Integer target = graph.getEdgeTarget(e);
//The match map is all matches found, not just true matches!
matchMap.put(source.getSequence(), target.getSequence());
check = target.getSequence().equals(distCellsMapAlphaKey.get(source.getSequence()));
//check = plateVtoBMap.get(target).equals(distCellsMapAlphaKey.get(plateVtoAMap.get(source)));
if(check) {
trueCount++;
}
@@ -241,26 +247,28 @@ public class Simulator implements GraphModificationFunctions {
}
List<String> result = new ArrayList<>();
//alpha sequence
result.add(source.getSequence().toString());
result.add(source.getSequence());
//alpha well count
result.add(source.getOccupancy().toString());
//beta sequence
result.add(target.getSequence().toString());
result.add(target.getSequence());
//beta well count
result.add(target.getOccupancy().toString());
//overlap count
result.add(Double.toString(graph.getEdgeWeight(e)));
result.add(Boolean.toString(check));
double pValue = Equations.pValue(numWells, source.getOccupancy(),
if (calculatePValue) {
double pValue = Equations.pValue(numWells, source.getOccupancy(),
target.getOccupancy(), graph.getEdgeWeight(e));
BigDecimal pValueTrunc = new BigDecimal(pValue, mc);
result.add(pValueTrunc.toString());
BigDecimal pValueTrunc = new BigDecimal(pValue, mc);
result.add(pValueTrunc.toString());
}
allResults.add(result);
}
//Metadata comments for CSV file
String algoType = "LEDA book with heap: " + heapType;
int min = Math.min(alphaCount, betaCount);
int min = Math.min(graphAlphaCount, graphBetaCount);
//matching weight
BigDecimal totalMatchingWeight = maxWeightMatching.getMatchingWeight();
//rate of attempted matching
@@ -285,29 +293,45 @@ public class Simulator implements GraphModificationFunctions {
populationsStringBuilder.append(wellPopulations[i].toString());
}
String wellPopulationsString = populationsStringBuilder.toString();
//graph generation time
Duration graphTime = data.getTime();
//MWM run time
Duration pairingTime = Duration.between(start, stop);
//total simulation time
Duration time = Duration.between(start, stop);
time = time.plus(data.getTime());
Duration totalTime = graphTime.plus(pairingTime);
Map<String, String> metadata = new LinkedHashMap<>();
metadata.put("sample plate filename", data.getSourceFilename());
metadata.put("cell sample filename", data.getCellFilename());
metadata.put("cell sample size", data.getCellSampleSize().toString());
metadata.put("sample plate filename", data.getPlateFilename());
metadata.put("sample plate well count", data.getNumWells().toString());
metadata.put("sequence dropout rate", data.getDropoutRate().toString());
metadata.put("graph filename", dataFilename);
metadata.put("algorithm type", algoType);
metadata.put("MWM algorithm type", algoType);
metadata.put("matching weight", totalMatchingWeight.toString());
metadata.put("well populations", wellPopulationsString);
metadata.put("total alphas found", alphaCount.toString());
metadata.put("total betas found", betaCount.toString());
metadata.put("high overlap threshold", highThreshold.toString());
metadata.put("low overlap threshold", lowThreshold.toString());
metadata.put("minimum overlap percent", minOverlapPercent.toString());
metadata.put("maximum occupancy difference", maxOccupancyDifference.toString());
metadata.put("sequence read depth", data.getReadDepth().toString());
metadata.put("sequence read error rate", data.getReadErrorRate().toString());
metadata.put("read error collision rate", data.getErrorCollisionRate().toString());
metadata.put("real sequence collision rate", data.getRealSequenceCollisionRate().toString());
metadata.put("total alphas read from plate", data.getAlphaCount().toString());
metadata.put("total betas read from plate", data.getBetaCount().toString());
metadata.put("alphas in graph (after pre-filtering)", graphAlphaCount.toString());
metadata.put("betas in graph (after pre-filtering)", graphBetaCount.toString());
metadata.put("high overlap threshold for pairing", highThreshold.toString());
metadata.put("low overlap threshold for pairing", lowThreshold.toString());
metadata.put("minimum overlap percent for pairing", minOverlapPercent.toString());
metadata.put("maximum occupancy difference for pairing", maxOccupancyDifference.toString());
metadata.put("pairing attempt rate", attemptRateTrunc.toString());
metadata.put("correct pairing count", Integer.toString(trueCount));
metadata.put("incorrect pairing count", Integer.toString(falseCount));
metadata.put("pairing error rate", pairingErrorRateTrunc.toString());
metadata.put("simulation time (seconds)", nf.format(time.toSeconds()));
metadata.put("time to generate graph (seconds)", nf.format(graphTime.toSeconds()));
metadata.put("time to pair sequences (seconds)",nf.format(pairingTime.toSeconds()));
metadata.put("total simulation time (seconds)", nf.format(totalTime.toSeconds()));
//create MatchingResult object
MatchingResult output = new MatchingResult(metadata, header, allResults, matchMap, time);
MatchingResult output = new MatchingResult(metadata, header, allResults, matchMap);
if(verbose){
for(String s: output.getComments()){
System.out.println(s);
@@ -629,81 +653,97 @@ public class Simulator implements GraphModificationFunctions {
// }
//Remove sequences based on occupancy
public static void filterByOccupancyThresholds(Map<Integer, Integer> wellMap, int low, int high){
List<Integer> noise = new ArrayList<>();
for(Integer k: wellMap.keySet()){
if((wellMap.get(k) > high) || (wellMap.get(k) < low)){
private static void filterByOccupancyThresholds(Map<String, SequenceRecord> wellMap, int low, int high){
List<String> noise = new ArrayList<>();
for(String k: wellMap.keySet()){
if((wellMap.get(k).getOccupancy() > high) || (wellMap.get(k).getOccupancy() < low)){
noise.add(k);
}
}
for(Integer k: noise) {
for(String k: noise) {
wellMap.remove(k);
}
}
//Counts the well occupancy of the row peptides and column peptides into given maps, and
//fills weights in the given 2D array
private static void countSequencesAndFillMatrix(Plate samplePlate,
Map<Integer,Integer> allRowSequences,
Map<Integer,Integer> allColumnSequences,
Map<Integer,Integer> rowSequenceToVertexMap,
Map<Integer,Integer> columnSequenceToVertexMap,
int[] rowSequenceIndices,
int[] colSequenceIndices,
Map<Integer, Integer> rowSequenceCounts,
Map<Integer,Integer> columnSequenceCounts,
double[][] weights){
Map<Integer, Integer> wellNRowSequences = null;
Map<Integer, Integer> wellNColumnSequences = null;
int vertexStartValue = rowSequenceToVertexMap.size();
int numWells = samplePlate.getSize();
for (int n = 0; n < numWells; n++) {
wellNRowSequences = samplePlate.assayWellsSequenceS(n, rowSequenceIndices);
for (Integer a : wellNRowSequences.keySet()) {
if(allRowSequences.containsKey(a)){
rowSequenceCounts.merge(a, 1, (oldValue, newValue) -> oldValue + newValue);
}
private static void filterByOccupancyAndReadCount(Map<String, SequenceRecord> sequences, int readDepth) {
List<String> noise = new ArrayList<>();
for(String k : sequences.keySet()){
//the sequence read count should be more than half the occupancy times read depth if the read error rate is low
Integer threshold = (sequences.get(k).getOccupancy() * readDepth) / 2;
if(sequences.get(k).getReadCount() < threshold) {
noise.add(k);
}
wellNColumnSequences = samplePlate.assayWellsSequenceS(n, colSequenceIndices);
for (Integer b : wellNColumnSequences.keySet()) {
if(allColumnSequences.containsKey(b)){
columnSequenceCounts.merge(b, 1, (oldValue, newValue) -> oldValue + newValue);
}
}
for (Integer i : wellNRowSequences.keySet()) {
if(allRowSequences.containsKey(i)){
for (Integer j : wellNColumnSequences.keySet()) {
if(allColumnSequences.containsKey(j)){
weights[rowSequenceToVertexMap.get(i)][columnSequenceToVertexMap.get(j) - vertexStartValue] += 1.0;
}
}
}
}
}
for(String k : noise) {
sequences.remove(k);
}
}
private static Map<Integer, Integer> makeSequenceToSequenceMap(List<Integer[]> cells, int keySequenceIndex,
int valueSequenceIndex){
Map<Integer, Integer> keySequenceToValueSequenceMap = new HashMap<>();
for (Integer[] cell : cells) {
private static int filterWellsByReadCount(Map<String, SequenceRecord> sequences) {
int count = 0;
for (String k: sequences.keySet()) {
//If a sequence has read count R and appears in W wells, then on average its read count in each
//well should be R/W. Delete any wells where the read count is less than R/2W.
Integer threshold = sequences.get(k).getReadCount() / (2 * sequences.get(k).getOccupancy());
List<Integer> noise = new ArrayList<>();
for (Integer well: sequences.get(k).getWells()) {
if (sequences.get(k).getReadCount(well) < threshold) {
noise.add(well);
count++;
}
}
for (Integer well: noise) {
sequences.get(k).deleteWell(well);
}
}
return count;
}
private static Map<String, String> makeSequenceToSequenceMap(List<String[]> cells, int keySequenceIndex,
int valueSequenceIndex){
Map<String, String> keySequenceToValueSequenceMap = new HashMap<>();
for (String[] cell : cells) {
keySequenceToValueSequenceMap.put(cell[keySequenceIndex], cell[valueSequenceIndex]);
}
return keySequenceToValueSequenceMap;
}
private static Map<Integer, Integer> makeVertexToSequenceMap(Map<Integer, Integer> sequences, Integer startValue) {
Map<Integer, Integer> map = new LinkedHashMap<>(); //LinkedHashMap to preserve order of entry
Integer index = startValue; //is this necessary? I don't think I use this.
for (Integer k: sequences.keySet()) {
private static Map<Integer, String> makeVertexToSequenceMap(Map<String, SequenceRecord> sequences, Integer startValue) {
Map<Integer, String> map = new LinkedHashMap<>(); //LinkedHashMap to preserve order of entry
Integer index = startValue;
for (String k: sequences.keySet()) {
map.put(index, k);
index++;
}
return map;
}
private static Map<Integer, Integer> invertVertexMap(Map<Integer, Integer> map) {
Map<Integer, Integer> inverse = new HashMap<>();
private static Map<String, Integer> makeSequenceToVertexMap(Map<String, SequenceRecord> sequences, Integer startValue) {
Map<String, Integer> map = new LinkedHashMap<>(); //LinkedHashMap to preserve order of entry
Integer index = startValue;
for (String k: sequences.keySet()) {
map.put(k, index);
index++;
}
return map;
}
private static void fillAdjacencyMatrix(double[][] weights, Integer vertexOffsetValue, Map<String, SequenceRecord> rowSequences,
Map<String, SequenceRecord> columnSequences, Map<String, Integer> rowToVertexMap,
Map<String, Integer> columnToVertexMap) {
for (String rowSeq: rowSequences.keySet()) {
for (Integer well: rowSequences.get(rowSeq).getWells()) {
for (String colSeq: columnSequences.keySet()) {
if (columnSequences.get(colSeq).isInWell(well)) {
weights[rowToVertexMap.get(rowSeq)][columnToVertexMap.get(colSeq) - vertexOffsetValue] += 1.0;
}
}
}
}
}
private static Map<String, Integer> invertVertexMap(Map<Integer, String> map) {
Map<String, Integer> inverse = new HashMap<>();
for (Integer k : map.keySet()) {
inverse.put(map.get(k), k);
}

View File

@@ -1,63 +1,45 @@
import org.jheaps.AddressableHeap;
import java.io.Serializable;
import java.util.Map;
public class Vertex implements Serializable {
private SequenceType type;
private Integer vertexLabel;
private Integer sequence;
private Integer occupancy;
public class Vertex implements Serializable, Comparable<Vertex> {
private SequenceRecord record;
private Integer vertexLabel;
private Double potential;
private AddressableHeap queue;
public Vertex(Integer vertexLabel) {
public Vertex(SequenceRecord record, Integer vertexLabel) {
this.record = record;
this.vertexLabel = vertexLabel;
}
public Vertex(String vertexLabel) {
this.vertexLabel = Integer.parseInt((vertexLabel));
}
public Vertex(SequenceType type, Integer sequence, Integer occupancy, Integer vertexLabel) {
this.type = type;
this.vertexLabel = vertexLabel;
this.sequence = sequence;
this.occupancy = occupancy;
}
public SequenceRecord getRecord() { return record; }
public SequenceType getType() {
return type;
}
public void setType(String type) {
this.type = SequenceType.valueOf(type);
}
public SequenceType getType() { return record.getSequenceType(); }
public Integer getVertexLabel() {
return vertexLabel;
}
public void setVertexLabel(String label) {
this.vertexLabel = Integer.parseInt(label);
}
public Integer getSequence() {
return sequence;
}
public void setSequence(String sequence) {
this.sequence = Integer.parseInt(sequence);
public String getSequence() {
return record.getSequence();
}
public Integer getOccupancy() {
return occupancy;
return record.getOccupancy();
}
public void setOccupancy(String occupancy) {
this.occupancy = Integer.parseInt(occupancy);
}
public Integer getReadCount() { return record.getReadCount(); }
public Integer getReadCount(Integer well) { return record.getReadCount(well); }
public Map<Integer, Integer> getWellOccupancies() { return record.getWellOccupancies(); }
@Override //adapted from JGraphT example code
public int hashCode()
{
return (sequence == null) ? 0 : sequence.hashCode();
return (this.getSequence() == null) ? 0 : this.getSequence().hashCode();
}
@Override //adapted from JGraphT example code
@@ -70,23 +52,26 @@ public class Vertex implements Serializable {
if (getClass() != obj.getClass())
return false;
Vertex other = (Vertex) obj;
if (sequence == null) {
return other.sequence == null;
if (this.getSequence() == null) {
return other.getSequence() == null;
} else {
return sequence.equals(other.sequence);
return this.getSequence().equals(other.getSequence());
}
}
@Override //adapted from JGraphT example code
public String toString()
{
StringBuilder sb = new StringBuilder();
sb.append("(").append(vertexLabel)
.append(", Type: ").append(type.name())
.append(", Sequence: ").append(sequence)
.append(", Occupancy: ").append(occupancy).append(")");
.append(", Type: ").append(this.getType().name())
.append(", Sequence: ").append(this.getSequence())
.append(", Occupancy: ").append(this.getOccupancy()).append(")");
return sb.toString();
}
@Override
public int compareTo(Vertex other) {
return this.vertexLabel - other.getVertexLabel();
}
}