56 Commits
v4.2 ... master

Author SHA1 Message Date
eugenefischer
4099ec2623 update ToDos 2025-04-15 15:50:55 -05:00
eugenefischer
7744586e79 change frequency of garbage collection requests 2025-04-10 20:07:34 -05:00
eugenefischer
83eff0d1e7 remove output to stdout that was added for testing 2025-04-10 15:08:33 -05:00
eugenefischer
d1810c453d Even more efficient graph creation (my initial scheme, but this time without accidentally changing what's in the sequence records) 2025-04-10 15:03:10 -05:00
eugenefischer
187401f2d6 More efficient graph creation 2025-04-10 14:06:11 -05:00
eugenefischer
678ce99424 iterate over vertex wells correctly 2025-04-10 13:34:04 -05:00
eugenefischer
c21e375303 fix concurrent modification bug 2025-04-10 13:33:47 -05:00
eugenefischer
57fe9c1619 Update graph modification functions to work with edges directly 2025-04-10 12:42:19 -05:00
eugenefischer
e1888a99c6 refactor to construct the bipartite graph directly, rather than by using an adjacency matrix and a graph generator. 2025-04-10 11:47:15 -05:00
eugenefischer
bcf5a4c749 change artifact details 2025-04-10 11:05:08 -05:00
eugenefischer
81d8a12765 dependency update stuff 2025-04-10 10:54:05 -05:00
eugenefischer
b5c0568e22 Add dependencies 2025-04-10 10:53:42 -05:00
eugenefischer
b7597cff2a update readme and add Zipf exponent option to CLI 2025-04-09 16:16:46 -05:00
eugenefischer
7bbeaf7dad update readme 2025-04-09 14:40:49 -05:00
eugenefischer
945b967382 update readme 2025-04-09 14:39:46 -05:00
eugenefischer
a43ee469ea implement Zipf distribution 2025-04-09 14:32:02 -05:00
eugenefischer
161a52aa89 update readme 2025-04-09 11:52:03 -05:00
eugenefischer
9b2ad9da09 update readme 2025-04-09 11:42:10 -05:00
eugenefischer
30a3f6e33d update citations 2025-04-09 11:36:06 -05:00
eugenefischer
8cc1f19da1 update links 2025-04-09 11:31:05 -05:00
eugenefischer
3efa5c26d8 fix index link 2025-04-09 11:22:13 -05:00
eugenefischer
e686d4957b disable selection of the scaling integer weight MWM algorithm via the interactive interface 2025-04-09 11:20:52 -05:00
eugenefischer
fbc0496675 update readme and default heap type 2025-04-09 11:18:21 -05:00
eugenefischer
0071cafbbd Rough implementation, missing final dual adjustment step, and may have other bugs as well as it does not yet output a maximum weight matching 2025-04-09 10:17:13 -05:00
eugenefischer
3d302cf8ad initial commit of stub of integer weight scaling algorithm 2025-03-27 13:42:27 -05:00
eugenefischer
5f5d77b0a4 update citations 2023-04-09 20:59:09 -05:00
eugenefischer
af32be85ee update TODO 2023-04-09 20:49:39 -05:00
eugenefischer
58cdf9ae93 Lookback AA implementation, doesn't currently work 2023-04-09 20:45:03 -05:00
eugenefischer
202ad4c834 mention forward/reverse auction algorithms 2023-04-09 20:42:58 -05:00
eugenefischer
96d49d0034 clarifying comment 2023-04-09 19:48:43 -05:00
eugenefischer
d8e5f7ece0 update todo 2023-04-09 13:00:41 -05:00
eugenefischer
9c81d919b4 add disclosure section 2023-01-18 16:28:16 -06:00
eugenefischer
70b08e7c22 Bugfixes and streamlining 2022-10-22 17:59:01 -05:00
eugenefischer
44158d264c Correct sequence count 2022-10-22 16:16:32 -05:00
eugenefischer
e97c2989db Add dropout rate calculation to read-in of data from plate file (this may slow down read-in by a lot) 2022-10-22 16:04:41 -05:00
eugenefischer
f7709ada73 Change order of metadata comments 2022-10-22 15:50:35 -05:00
eugenefischer
25b37eff48 renamed to MaximumIntegerWeightBipartiteAuctionMatching 2022-10-22 15:00:22 -05:00
eugenefischer
fbbb5a8792 Update comments 2022-10-22 14:59:43 -05:00
eugenefischer
4b9d7f8494 Add option to select matching algorithm type, rename types in output 2022-10-22 14:59:24 -05:00
eugenefischer
0de12a3a12 Refactor to use selected algorithm type 2022-10-22 14:58:40 -05:00
eugenefischer
3c2ec9002e Add field for algorithm type, methods to set algorithm type 2022-10-22 14:13:31 -05:00
eugenefischer
bcf3af5a83 Update algorithm type names 2022-10-22 14:10:00 -05:00
eugenefischer
fcca22a2f0 Rename class, modify bidding to include marginal item value 2022-10-22 13:18:43 -05:00
eugenefischer
910de0ce9d Fix typos 2022-10-21 13:46:10 -05:00
eugenefischer
ef349ea5f6 Correctly store matching weight 2022-10-14 18:44:56 -05:00
eugenefischer
174db66c46 Clean up comments 2022-10-14 18:31:32 -05:00
eugenefischer
b3273855a6 Test simpler source/target differentiation 2022-10-14 18:11:21 -05:00
eugenefischer
51c1bc2551 Skip edges with zero weight 2022-10-14 18:09:34 -05:00
eugenefischer
f7d522e95d Comment out old MWM algorithm, add auction algorithm 2022-10-14 17:38:07 -05:00
eugenefischer
5f0c089b0a add getter for matchingWeight 2022-10-14 17:37:40 -05:00
eugenefischer
d3066095d9 add getter/setter for potential 2022-10-14 17:32:37 -05:00
eugenefischer
55a5d9a892 Making fields final 2022-10-14 17:32:21 -05:00
eugenefischer
49708f2f8a Initial auction algorithm implementation 2022-10-14 17:31:59 -05:00
eugenefischer
c7934ca498 update TODO 2022-10-03 21:30:32 -05:00
eugenefischer
8f0ed91cb7 revert previous commit 2022-10-01 18:36:41 -05:00
eugenefischer
40bc2ce88d New linking test 2022-10-01 18:35:58 -05:00
23 changed files with 2276 additions and 311 deletions

1
.idea/.name generated Normal file
View File

@@ -0,0 +1 @@
BiGpairSEQ

View File

@@ -1,16 +1,27 @@
<component name="ArtifactManager">
<artifact type="jar" build-on-make="true" name="BiGpairSEQ_Sim:jar">
<artifact type="jar" name="BiGpairSEQ_Sim:jar">
<output-path>$PROJECT_DIR$/out/artifacts/BiGpairSEQ_Sim_jar</output-path>
<root id="archive" name="BiGpairSEQ_Sim.jar">
<element id="directory" name="META-INF">
<element id="file-copy" path="$PROJECT_DIR$/src/main/java/META-INF/MANIFEST.MF" />
<element id="file-copy" path="$PROJECT_DIR$/META-INF/MANIFEST.MF" />
</element>
<element id="module-output" name="BigPairSEQ" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.1/jgrapht-core-1.5.1.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.13/jheaps-0.13.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/commons-cli/commons-cli/1.5.0/commons-cli-1.5.0.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-csv/1.9.0/commons-csv-1.9.0.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jetbrains/annotations/23.0.0/annotations-23.0.0.jar" path-in-jar="/" />
<element id="module-output" name="BiGpairSEQ_Sim" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.2/jgrapht-core-1.5.2.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-rng-sampling/1.6/commons-rng-sampling-1.6.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-csv/1.14.0/commons-csv-1.14.0.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jetbrains/annotations/26.0.2/annotations-26.0.2.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-io/1.5.2/jgrapht-io-1.5.2.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-rng-simple/1.6/commons-rng-simple-1.6.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/commons-io/commons-io/2.18.0/commons-io-2.18.0.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-rng-core/1.6/commons-rng-core-1.6.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/commons-codec/commons-codec/1.18.0/commons-codec-1.18.0.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-rng-client-api/1.6/commons-rng-client-api-1.6.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/commons-cli/commons-cli/1.9.0/commons-cli-1.9.0.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/antlr/antlr4-runtime/4.12.0/antlr4-runtime-4.12.0.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apfloat/apfloat/1.10.1/apfloat-1.10.1.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-text/1.10.0/commons-text-1.10.0.jar" path-in-jar="/" />
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.14/jheaps-0.14.jar" path-in-jar="/" />
</root>
</artifact>
</component>

1
.idea/compiler.xml generated
View File

@@ -7,6 +7,7 @@
<sourceTestOutputDir name="target/generated-test-sources/test-annotations" />
<outputRelativeToContentRoot value="true" />
<module name="BigPairSEQ" />
<module name="BiGpairSEQ_Sim" />
</profile>
</annotationProcessing>
</component>

View File

@@ -1,20 +1,35 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="RemoteRepositoriesConfiguration">
<remote-repository>
<option name="id" value="my-internal-site" />
<option name="name" value="my-internal-site" />
<option name="url" value="https://myserver/repo" />
</remote-repository>
<remote-repository>
<option name="id" value="central" />
<option name="name" value="Central Repository" />
<option name="url" value="https://repo1.maven.org/maven2" />
</remote-repository>
<remote-repository>
<option name="id" value="central repo" />
<option name="name" value="central repo" />
<option name="url" value="https://repo1.maven.org/maven2/" />
</remote-repository>
<remote-repository>
<option name="id" value="central" />
<option name="name" value="Central Repository" />
<option name="url" value="https://repo.maven.apache.org/maven2" />
</remote-repository>
<remote-repository>
<option name="id" value="central" />
<option name="name" value="Maven Central repository" />
<option name="url" value="https://repo1.maven.org/maven2" />
</remote-repository>
<remote-repository>
<option name="id" value="jboss.community" />
<option name="name" value="JBoss Community repository" />
<option name="url" value="https://repository.jboss.org/nexus/content/repositories/public/" />
</remote-repository>
<remote-repository>
<option name="id" value="34d16bdc-85f0-48ee-8e8b-144091765be1" />
<option name="name" value="34d16bdc-85f0-48ee-8e8b-144091765be1" />
<option name="url" value="https://repository.mulesoft.org/nexus/content/repositories/public/" />
</remote-repository>
</component>
</project>

View File

@@ -1,8 +1,10 @@
<component name="libraryTable">
<library name="apache.commons.csv" type="repository">
<properties maven-id="org.apache.commons:commons-csv:1.9.0" />
<properties maven-id="org.apache.commons:commons-csv:1.14.0" />
<CLASSES>
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-csv/1.9.0/commons-csv-1.9.0.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-csv/1.14.0/commons-csv-1.14.0.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/commons-io/commons-io/2.18.0/commons-io-2.18.0.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/commons-codec/commons-codec/1.18.0/commons-codec-1.18.0.jar!/" />
</CLASSES>
<JAVADOC />
<SOURCES />

View File

@@ -1,8 +1,8 @@
<component name="libraryTable">
<library name="commons.cli" type="repository">
<properties maven-id="commons-cli:commons-cli:1.5.0" />
<properties maven-id="commons-cli:commons-cli:1.9.0" />
<CLASSES>
<root url="jar://$MAVEN_REPOSITORY$/commons-cli/commons-cli/1.5.0/commons-cli-1.5.0.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/commons-cli/commons-cli/1.9.0/commons-cli-1.9.0.jar!/" />
</CLASSES>
<JAVADOC />
<SOURCES />

View File

@@ -1,9 +1,10 @@
<component name="libraryTable">
<library name="jgrapht.core" type="repository">
<properties maven-id="org.jgrapht:jgrapht-core:1.5.1" />
<properties maven-id="org.jgrapht:jgrapht-core:1.5.2" />
<CLASSES>
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.1/jgrapht-core-1.5.1.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.13/jheaps-0.13.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.2/jgrapht-core-1.5.2.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.14/jheaps-0.14.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/apfloat/apfloat/1.10.1/apfloat-1.10.1.jar!/" />
</CLASSES>
<JAVADOC />
<SOURCES />

View File

@@ -1,13 +1,14 @@
<component name="libraryTable">
<library name="jgrapht.io" type="repository">
<properties maven-id="org.jgrapht:jgrapht-io:1.5.1" />
<properties maven-id="org.jgrapht:jgrapht-io:1.5.2" />
<CLASSES>
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-io/1.5.1/jgrapht-io-1.5.1.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.1/jgrapht-core-1.5.1.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.13/jheaps-0.13.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/antlr/antlr4-runtime/4.8-1/antlr4-runtime-4.8-1.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-text/1.8/commons-text-1.8.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-io/1.5.2/jgrapht-io-1.5.2.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.2/jgrapht-core-1.5.2.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.14/jheaps-0.14.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/apfloat/apfloat/1.10.1/apfloat-1.10.1.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/antlr/antlr4-runtime/4.12.0/antlr4-runtime-4.12.0.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-text/1.10.0/commons-text-1.10.0.jar!/" />
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar!/" />
</CLASSES>
<JAVADOC />
<SOURCES />

44
pom.xml
View File

@@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.example</groupId>
<artifactId>TCellSim</artifactId>
<artifactId>BiGpairSEQ_Sim</artifactId>
<version>1.0-SNAPSHOT</version>
<build>
<plugins>
@@ -26,8 +26,48 @@
<version>RELEASE</version>
<scope>compile</scope>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-rng-simple -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-rng-simple</artifactId>
<version>1.6</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-rng-sampling</artifactId>
<version>1.6</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-csv -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-csv</artifactId>
<version>1.14.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.jgrapht/jgrapht-core -->
<dependency>
<groupId>org.jgrapht</groupId>
<artifactId>jgrapht-core</artifactId>
<version>1.5.2</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.jgrapht/jgrapht-io -->
<dependency>
<groupId>org.jgrapht</groupId>
<artifactId>jgrapht-io</artifactId>
<version>1.5.2</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.jheaps/jheaps -->
<dependency>
<groupId>org.jheaps</groupId>
<artifactId>jheaps</artifactId>
<version>0.14</version>
</dependency>
<!-- https://mvnrepository.com/artifact/commons-cli/commons-cli -->
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
<version>1.9.0</version>
</dependency>
</dependencies>
<properties>
<maven.compiler.source>11</maven.compiler.source>
<maven.compiler.target>11</maven.compiler.target>

214
readme.md
View File

@@ -1,30 +1,34 @@
# BiGpairSEQ SIMULATOR
## CONTENTS
1. ABOUT
2. THEORY
3. THE BiGpairSEQ ALGORITHM
4. USAGE
1. RUNNING THE PROGRAM
2. COMMAND LINE OPTIONS
3. INTERACTIVE INTERFACE
4. INPUT/OUTPUT
1. Cell Sample Files
2. Sample Plate Files
3. Graph/Data Files
4. Matching Results Files
5. RESULTS
1. SAMPLE PLATES WITH VARYING NUMBERS OF CELLS PER WELL
2. SIMULATING EXPERIMENTS FROM pairSEQ PAPER
6. TODO
7. CITATIONS
8. ACKNOWLEDGEMENTS
9. AUTHOR
1. [ABOUT](#about)
2. [THEORY](#theory)
3. [THE BiGpairSEQ ALGORITHM](#the-bigpairseq-algorithm)
4. [USAGE](#usage)
1. [RUNNING THE PROGRAM](#running-the-program)
2. [COMMAND LINE OPTIONS](#command-line-options)
3. [INTERACTIVE INTERFACE](#interactive-interface)
4. [INPUT/OUTPUT](#input-output)
1. [Cell Sample Files](#cell-sample-files)
2. [Sample Plate Files](#sample-plate-files)
3. [Graph/Data Files](#graph-data-files)
4. [Matching Results Files](#matching-results-files)
5. [RESULTS](#results)
1. [SAMPLE PLATES WITH VARYING NUMBERS OF CELLS PER WELL](#sample-plates-with-varying-numbers-of-cells-per-well)
2. [SIMULATING EXPERIMENTS FROM THE 2015 pairSEQ PAPER](#simulating-experiments-from-the-2015-pairseq-paper)
1. [EXPERIMENT 1](#experiment-1)
2. [EXPERIMENT 3](#experiment-3)
6. [CITATIONS](#citations)
7. [EXTERNAL LIBRARIES USED](#external-libraries-used)
8. [ACKNOWLEDGEMENTS](#acknowledgements)
9. [AUTHOR](#author)
10. [DISCLOSURE](#disclosure)
11. [TODO](#todo)
## ABOUT
This program simulates BiGpairSEQ (Bipartite Graph pairSEQ), a graph theory-based adaptation
of the pairSEQ algorithm (Howie, et al. 2015) for pairing T cell receptor sequences.
of the pairSEQ algorithm ([Howie, et al. 2015](#citations)) for pairing T cell receptor sequences.
## THEORY
@@ -50,21 +54,17 @@ matching (MWM) on a bipartite graph--the subset of vertex-disjoint edges whose w
This is a well-studied combinatorial optimization problem, with many known algorithms that produce
provably-optimal solutions. The most theoretically efficient algorithm known to the author for maximum weight matching of a bipartite
graph with strictly integral weights is from Duan and Su (2012). For a graph with m edges, n vertices per side,
graph with strictly integral weights is from [Duan and Su (2012)](#citations). For a graph with m edges, n vertices per side,
and maximum integer edge weight N, their algorithm runs in **O(m sqrt(n) log(N))** time. As the graph representation of
a pairSEQ experiment is bipartite with integer weights, this algorithm seems ideal for BiGpairSEQ. Unfortunately, it's a
fairly new algorithm, and not yet implemented by the graph theory library used in this simulator (JGraphT), nor has the author had
time to implement it himself.
There have been some studies which show that [auction algorithms](https://en.wikipedia.org/wiki/Auction_algorithm) for the assignment problem can have superior performance in
real-world implementations, due to their simplicity, than more complex algorithms with better theoretical asymptotic
performance. But, again, there is no such algorithms implemented by JGraphT, nor has the author yet had time to implement one.
a pairSEQ experiment is bipartite with integer weights, this algorithm seems ideal for BiGpairSEQ. Unfortunately, it is not
implemented by the graph theory library used in this simulator (JGraphT), and the author has not yet had time to write a
full, optimized implementation himself for testing.
So this program instead uses the [Fibonacci heap](https://en.wikipedia.org/wiki/Fibonacci_heap) based algorithm of Fredman and Tarjan (1987) (essentially
[the Hungarian algorithm](https://en.wikipedia.org/wiki/Hungarian_algorithm) augmented with a more efficeint priority queue) which has a worst-case
runtime of **O(n (n log(n) + m))**. The algorithm is implemented as described in Melhorn and Näher (1999). (The simulator
allows the substitution of a [pairing heap](https://en.wikipedia.org/wiki/Pairing_heap) for a Fibonacci heap, though the relative performance difference of the two
has not yet been thoroughly tested.)
[the Hungarian algorithm](https://en.wikipedia.org/wiki/Hungarian_algorithm) augmented with a more efficient priority queue) which has a worst-case
runtime of **O(n (n log(n) + m))**. The algorithm is implemented as described in [Melhorn and Näher (1999)](#citations). (The simulator can use either a
Fibonacci heap or a [pairing heap](https://en.wikipedia.org/wiki/Pairing_heap) as desired. By default, a pairing heap is used,
as in practice they often offer superior performance.)
One possible advantage of this less efficient algorithm is that the Hungarian algorithm and its variations work with both the balanced and the unbalanced assignment problem
(that is, cases where both sides of the bipartite graph have the same number of vertices and those in which they don't.)
@@ -74,6 +74,15 @@ be balanced assignment problems, in practice sequence dropout can cause them to
the Hungarian algorithm, graph doubling--which could be challenging with the computational resources available to the
author--has not yet been necessary.
There have been some studies which show that [auction algorithms](https://en.wikipedia.org/wiki/Auction_algorithm) for the assignment problem can have superior performance in
real-world implementations, due to their simplicity, than more complex algorithms with better theoretical asymptotic
performance. The author has implemented a basic forward auction algorithm, which produces optimal assignment for unbalanced bipartite graphs with
integer weights. To allow for unbalanced assignment, this algorithm eschews epsilon-scaling,
and as a result is prone to "bidding-wars" which increase run time, making it less efficient than the implementation of
the Fredman-Tarjan algorithm in JGraphT. A forward/reverse auction algorithm as developed by Bertsekas and Castañon
should be able to handle unbalanced (or, as they call it, asymmetric) assignment much more efficiently, but has yet to be
implemented.
The relative time/space efficiencies of BiGpairSEQ when backed by different MWM algorithms remains an open problem.
## THE BiGpairSEQ ALGORITHM
@@ -127,7 +136,7 @@ There are a number of command line options, to allow the program to be used in s
`java -jar BiGpairSEQ_Sim.jar -help`
```
usage: BiGpairSEQ_Sim.jar
usage: BiGpairSEQ_Sim.jar
-cells,--make-cells Makes a cell sample file of distinct T cells
-graph,--make-graph Makes a graph/data file. Requires a cell sample
file and a sample plate file
@@ -147,6 +156,8 @@ usage: BiGpairSEQ_Sim.jar -plate
-c,--cell-file <filename> The cell sample file to use
-d,--dropout-rate <rate> The sequence dropout rate due to
amplification error. (0.0 - 1.0)
-exp <value> If using -zipf flag, exponent value for
distribution
-exponential Use an exponential distribution for cell
sample
-gaussian Use a Gaussian distribution for cell sample
@@ -164,6 +175,7 @@ usage: BiGpairSEQ_Sim.jar -plate
-stddev <value> If using -gaussian flag, standard deviation
for distrbution
-w,--wells <number> The number of wells on the sample plate
-zipf Use a Zipf distribution for cell sample
usage: BiGpairSEQ_Sim.jar -graph
-c,--cell-file <filename> Cell sample file to use for
@@ -225,7 +237,6 @@ usage: BiGpairSEQ_Sim.jar -match
to stdout.
-pv,--p-value (Optional) Calculate p-values for sequence
pairs.
```
### INTERACTIVE INTERFACE
@@ -331,6 +342,8 @@ Options when making a Sample Plate file:
* Standard deviation size
* Exponential
* Lambda value
* Zipf
* Exponent value
* Total number of wells on the plate
* Well populations random or fixed
* If random, minimum and maximum population sizes
@@ -474,8 +487,9 @@ Several BiGpairSEQ simulations were performed on a home computer with the follow
* Linux Mint 21 (5.15 kernel)
### SAMPLE PLATES WITH VARYING NUMBERS OF CELLS PER WELL
NOTE: these results were obtained with an earlier version of BiGpairSEQ_Sim, and should be re-run with the current version.
The observed behavior is not believed to be likely to change, however.
The probability calculations used by pairSEQ require that every well on the sample plate contain the same number of T cells.
BiGpairSEQ does not share this limitation; it is robust to variations in the number of cells per well.
A series of BiGpairSEQ simulations were conducted using a cell sample file of 3.5 million unique T cells. From these cells,
10 sample plate files were created. All of these sample plates had 96 wells, used an exponential distribution with a lambda of 0.6, and
@@ -492,6 +506,9 @@ The well populations of the plates were:
All BiGpairSEQ simulations were run with a low overlap threshold of 3 and a high overlap threshold of 94.
No optional filters were used, so pairing was attempted for all sequences with overlaps within the threshold values.
NOTE: these results were obtained with an earlier version of BiGpairSEQ_Sim, and should be re-run with the current version.
The observed behavior is not believed to be likely to change, however.
Constant well population plate results:
| |1000 Cell/Well Plate|2000 Cell/Well Plate|3000 Cell/Well Plate|4000 Cell/Well Plate|5000 Cell/Well Plate
@@ -583,64 +600,14 @@ pairs called in the pairSEQ experiment. These results show that at very high sam
underlying frequency distribution drastically affect the results. The real distribution clearly has a much longer "tail"
than the simulated exponential distribution. Implementing a way to exert finer control over the sampling distribution from
the file of distinct cells may enable better simulated replication of this experiment.
## TODO
* ~~Try invoking GC at end of workloads to reduce paging to disk~~ DONE
* ~~Hold graph data in memory until another graph is read-in? ABANDONED UNABANDONED~~ DONE
* ~~*No, this won't work, because BiGpairSEQ simulations alter the underlying graph based on filtering constraints. Changes would cascade with multiple experiments.*~~
* Might have figured out a way to do it, by taking edges out and then putting them back into the graph. This may actually be possible.
* It is possible, though the modifications to the graph incur their own performance penalties. Need testing to see which option is best. It may be computer-specific.
* ~~Test whether pairing heap (currently used) or Fibonacci heap is more efficient for priority queue in current matching algorithm~~ DONE
* ~~in theory Fibonacci heap should be more efficient, but complexity overhead may eliminate theoretical advantage~~
* ~~Add controllable heap-type parameter?~~
* Parameter implemented. Fibonacci heap the current default.
* ~~Implement sample plates with random numbers of T cells per well.~~ DONE
* Possible BiGpairSEQ advantage over pairSEQ: BiGpairSEQ is resilient to variations in well population sizes on a sample plate; pairSEQ is not due to nature of probability calculations.
* preliminary data suggests that BiGpairSEQ behaves roughly as though the whole plate had whatever the *average* well concentration is, but that's still speculative.
* ~~See if there's a reasonable way to reformat Sample Plate files so that wells are columns instead of rows.~~
* ~~Problem is variable number of cells in a well~~
* ~~Apache Commons CSV library writes entries a row at a time~~
* Got this working, but at the cost of a profoundly strange bug in graph occupancy filtering. Have reverted the repo until I can figure out what caused that. Given how easily Thingiverse transposes CSV matrices in R, might not even be worth fixing.
* ~~Enable GraphML output in addition to serialized object binaries, for data portability~~ DONE
* ~~Have a branch where this is implemented, but there's a bug that broke matching. Don't currently have time to fix.~~
* ~~Re-implement command line arguments, to enable scripting and statistical simulation studies~~ DONE
* ~~Implement custom Vertex class to simplify code and make it easier to implement different MWM algorithms~~ DONE
* Advantage: would eliminate the need to use maps to associate vertices with sequences, which would make the code easier to understand.
* This also seems to be faster when using the same algorithm than the version with lots of maps, which is a nice bonus!
* ~~Implement simulation of read depth, and of read errors. Pre-filter graph for difference in read count to eliminate spurious sequences.~~ DONE
* Pre-filtering based on comparing (read depth) * (occupancy) to (read count) for each sequence works extremely well
* ~~Add read depth simulation options to CLI~~ DONE
* ~~Update graphml output to reflect current Vertex class attributes~~ DONE
* Individual well data from the SequenceRecords could be included, if there's ever a reason for it
* ~~Implement simulation of sequences being misread as other real sequence~~ DONE
* Update matching metadata output options in CLI
* Add frequency distribution details to metadata output
* need to make an enum for the different distribution types and refactor the Plate class and user interfaces, also add the necessary fields to GraphWithMapData and then call if from Simulator
* Update performance data in this readme
* Add section to ReadMe describing data filtering methods.
* Re-implement CDR1 matching method
* Refactor simulator code to collect all needed data in a single scan of the plate
* Currently it scans once for the vertices and then again for the edge weights. This made simulating read depth awkward, and incompatible with caching of plate files.
* This would be a fairly major rewrite of the simulator code, but could make things faster, and would definitely make them cleaner.
* Implement Duan and Su's maximum weight matching algorithm
* Add controllable algorithm-type parameter?
* This would be fun and valuable, but probably take more time than I have for a hobby project.
* Implement an auction algorithm for maximum weight matching
* Implement an algorithm for approximating a maximum weight matching
* Some of these run in linear or near-linear time
* given that the underlying biological samples have many, many sources of error, this would probably be the most useful option in practice. It seems less mathematically elegant, though, and so less fun for me.
* Implement Vose's alias method for arbitrary statistical distributions of cells
* Should probably refactor to use apache commons rng for this
* Use commons JCS for caching
* Parameterize pre-filtering options
## CITATIONS
* Howie, B., Sherwood, A. M., et al. ["High-throughput pairing of T cell receptor alpha and beta sequences."](https://pubmed.ncbi.nlm.nih.gov/26290413/) Sci. Transl. Med. 7, 301ra131 (2015)
* Duan, R., Su H. ["A Scaling Algorithm for Maximum Weight Matching in Bipartite Graphs."](https://web.eecs.umich.edu/~pettie/matching/Duan-Su-scaling-bipartite-matching.pdf) Proceedings of the Twenty-Third Annual ACM-SIAM Symposium on Discrete Algorithms, p. 1413-1424. (2012)
* Melhorn, K., Näher, St. [The LEDA Platform of Combinatorial and Geometric Computing.](https://people.mpi-inf.mpg.de/~mehlhorn/LEDAbook.html) Cambridge University Press. Chapter 7, Graph Algorithms; p. 132-162 (1999)
* Fredman, M., Tarjan, R. ["Fibonacci heaps and their uses in improved network optimization algorithms."](https://www.cl.cam.ac.uk/teaching/1011/AlgorithII/1987-FredmanTar-fibonacci.pdf) J. ACM, 34(3):596615 (1987))
* Bertsekas, D., Castañon, D. ["A forward/reverse auction algorithm for asymmetric assignment problems."](https://www.mit.edu/~dimitrib/For_Rev_Asym_Auction.pdf) Computational Optimization and Applications 1, 277-297 (1992)
* Dimitrios Michail, Joris Kinable, Barak Naveh, and John V. Sichi. ["JGraphT—A Java Library for Graph Data Structures and Algorithms."](https://dl.acm.org/doi/10.1145/3381449) ACM Trans. Math. Softw. 46, 2, Article 16 (2020)
## EXTERNAL LIBRARIES USED
* [JGraphT](https://jgrapht.org) -- Graph theory data structures and algorithms
@@ -649,8 +616,75 @@ the file of distinct cells may enable better simulated replication of this exper
* [Apache Commons CLI](https://commons.apache.org/proper/commons-cli/) -- To enable command line arguments for scripting.
## ACKNOWLEDGEMENTS
BiGpairSEQ was conceived in collaboration with Dr. Alice MacQueen, who brought the original
BiGpairSEQ was conceived in collaboration with the author's spouse, Dr. Alice MacQueen, who brought the original
pairSEQ paper to the author's attention and explained all the biology terms he didn't know.
## AUTHOR
BiGpairSEQ algorithm and simulation by Eugene Fischer, 2021. Improvements and documentation, 2022.
BiGpairSEQ algorithm and simulation by Eugene Fischer, 2021. Improvements and documentation, 20222025.
## DISCLOSURE
The earliest versions of the BiGpairSEQ simulator were written in 2021 to let Dr. MacQueen test hypothetical extensions
of the published pairSEQ protocol while she was interviewing for a position at Adaptive Biotechnologies. She was
employed at Adaptive Biotechnologies starting in 2022.
The author has worked on this BiGpairSEQ simulator since 2021 without Dr. MacQueen's involvement, since she has had
access to related, proprietary technologies. The author has had no such access, relying exclusively on the 2015 pairSEQ
paper and other academic publications. He continues to work on the BiGpairSEQ simulator recreationally, as it has been
a means of exploring some very beautiful math.
## TODO
* Consider whether a graph database might be a better option than keeping things in memory.
* Look at fastUtil for more performant maps and arrays. Note that there is an optional jGraphT library to work with fastUtil (see FastutilMapIntVertexGraph, for example).
* Consider implementing an option to use the jGrapht sparse graph representation for a lower memory cost with very large graphs (tens or hundreds of thousands of distinct sequences).
* ~~Update CLI option text in this readme to include Zipf distribution options~~
* ~~Try invoking GC at end of workloads to reduce paging to disk~~ DONE
* ~~Hold graph data in memory until another graph is read-in? ABANDONED UNABANDONED~~ DONE
* ~~*No, this won't work, because BiGpairSEQ simulations alter the underlying graph based on filtering constraints. Changes would cascade with multiple experiments.*~~
* Might have figured out a way to do it, by taking edges out and then putting them back into the graph. This may actually be possible.
* It is possible, though the modifications to the graph incur their own performance penalties. Need testing to see which option is best. It may be computer-specific.
* ~~Test whether pairing heap (currently used) or Fibonacci heap is more efficient for priority queue in current matching algorithm~~ DONE
* ~~in theory Fibonacci heap should be more efficient, but complexity overhead may eliminate theoretical advantage~~
* ~~Add controllable heap-type parameter?~~
* Parameter implemented. Pairing heap the current default.
* ~~Implement sample plates with random numbers of T cells per well.~~ DONE
* Possible BiGpairSEQ advantage over pairSEQ: BiGpairSEQ is resilient to variations in well population sizes on a sample plate; pairSEQ is not due to nature of probability calculations.
* preliminary data suggests that BiGpairSEQ behaves roughly as though the whole plate had whatever the *average* well concentration is, but that's still speculative.
* ~~See if there's a reasonable way to reformat Sample Plate files so that wells are columns instead of rows.~~
* ~~Problem is variable number of cells in a well~~
* ~~Apache Commons CSV library writes entries a row at a time~~
* Got this working, but at the cost of a profoundly strange bug in graph occupancy filtering. Have reverted the repo until I can figure out what caused that. Given how easily Thingiverse transposes CSV matrices in R, might not even be worth fixing.
* ~~Enable GraphML output in addition to serialized object binaries, for data portability~~ DONE
* ~~Have a branch where this is implemented, but there's a bug that broke matching. Don't currently have time to fix.~~
* ~~Re-implement command line arguments, to enable scripting and statistical simulation studies~~ DONE
* ~~Implement custom Vertex class to simplify code and make it easier to implement different MWM algorithms~~ DONE
* Advantage: would eliminate the need to use maps to associate vertices with sequences, which would make the code easier to understand.
* This also seems to be faster when using the same algorithm than the version with lots of maps, which is a nice bonus!
* ~~Implement simulation of read depth, and of read errors. Pre-filter graph for difference in read count to eliminate spurious sequences.~~ DONE
* Pre-filtering based on comparing (read depth) * (occupancy) to (read count) for each sequence works extremely well
* ~~Add read depth simulation options to CLI~~ DONE
* ~~Update graphml output to reflect current Vertex class attributes~~ DONE
* Individual well data from the SequenceRecords could be included, if there's ever a reason for it
* ~~Implement simulation of sequences being misread as other real sequence~~ DONE
* Implement redistributive heap for LEDA matching algorithm to achieve theoretical worst case of O(n(m + n log C)) where C is highest edge weight.
* Update matching metadata output options in CLI
* Add frequency distribution details to metadata output
* need to make an enum for the different distribution types and refactor the Plate class and user interfaces, also add the necessary fields to GraphWithMapData and then call if from Simulator
* Update performance data in this readme
* ~~Add section to ReadMe describing data filtering methods.~~ DONE, now part of algorithm description
* Re-implement CDR1 matching method
* ~~Refactor simulator code to collect all needed data in a single scan of the plate~~ DONE
* ~~Currently it scans once for the vertices and then again for the edge weights. This made simulating read depth awkward, and incompatible with caching of plate files.~~
* ~~This would be a fairly major rewrite of the simulator code, but could make things faster, and would definitely make them cleaner.~~
* Implement Duan and Su's maximum weight matching algorithm
* ~~Add controllable algorithm-type parameter?~~ DONE
* This would be fun and valuable, but probably take more time than I have for a hobby project.
* ~~Implement an auction algorithm for maximum weight matching~~ DONE
* Implement a forward/reverse auction algorithm for maximum weight matching
* Implement an algorithm for approximating a maximum weight matching
* Some of these run in linear or near-linear time
* given that the underlying biological samples have many, many sources of error, this would probably be the most useful option in practice. It seems less mathematically elegant, though, and so less fun for me.
* Implement Vose's alias method for arbitrary statistical distributions of cells
* Should probably refactor to use apache commons rng for this
* Use commons JCS for caching
* Parameterize pre-filtering options

View File

@@ -0,0 +1,5 @@
public enum AlgorithmType {
HUNGARIAN, //Hungarian algorithm
AUCTION, //Forward auction algorithm
INTEGER_WEIGHT_SCALING, //integer weight scaling algorithm of Duan and Su
}

View File

@@ -13,7 +13,9 @@ public class BiGpairSEQ {
private static boolean cacheCells = false;
private static boolean cachePlate = false;
private static boolean cacheGraph = false;
private static HeapType priorityQueueHeapType = HeapType.FIBONACCI;
private static AlgorithmType matchingAlgorithmType = AlgorithmType.HUNGARIAN;
private static HeapType priorityQueueHeapType = HeapType.PAIRING;
private static DistributionType distributionType = DistributionType.ZIPF;
private static boolean outputBinary = true;
private static boolean outputGraphML = false;
private static boolean calculatePValue = false;
@@ -59,6 +61,10 @@ public class BiGpairSEQ {
return cellFilename;
}
public static DistributionType getDistributionType() {return distributionType;}
public static void setDistributionType(DistributionType type) {distributionType = type;}
public static Plate getPlateInMemory() {
return plateInMemory;
}
@@ -108,7 +114,6 @@ public class BiGpairSEQ {
return graphFilename;
}
public static boolean cacheCells() {
return cacheCells;
}
@@ -157,10 +162,18 @@ public class BiGpairSEQ {
BiGpairSEQ.cacheGraph = cacheGraph;
}
public static String getPriorityQueueHeapType() {
return priorityQueueHeapType.name();
public static HeapType getPriorityQueueHeapType() {
return priorityQueueHeapType;
}
public static AlgorithmType getMatchingAlgorithmType() { return matchingAlgorithmType; }
public static void setHungarianAlgorithm() { matchingAlgorithmType = AlgorithmType.HUNGARIAN; }
public static void setIntegerWeightScalingAlgorithm() { matchingAlgorithmType = AlgorithmType.INTEGER_WEIGHT_SCALING; }
public static void setAuctionAlgorithm() { matchingAlgorithmType = AlgorithmType.AUCTION; }
public static void setPairingHeap() {
priorityQueueHeapType = HeapType.PAIRING;
}

View File

@@ -123,16 +123,20 @@ public class CommandLineInterface {
Plate plate;
if (line.hasOption("poisson")) {
Double stdDev = Math.sqrt(numWells);
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, stdDev, false);
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, stdDev);
}
else if (line.hasOption("gaussian")) {
Double stdDev = Double.parseDouble(line.getOptionValue("stddev"));
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, stdDev, false);
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, stdDev);
}
else if (line.hasOption("zipf")) {
Double zipfExponent = Double.parseDouble(line.getOptionValue("exp"));
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, zipfExponent);
}
else {
assert line.hasOption("exponential");
Double lambda = Double.parseDouble(line.getOptionValue("lambda"));
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, lambda, true);
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, lambda);
}
PlateFileWriter writer = new PlateFileWriter(outputFilename, plate);
writer.writePlateFile();
@@ -340,9 +344,13 @@ public class CommandLineInterface {
Option exponential = Option.builder("exponential")
.desc("Use an exponential distribution for cell sample")
.build();
Option zipf = Option.builder("zipf")
.desc("Use a Zipf distribution for cell sample")
.build();
distributions.addOption(poisson);
distributions.addOption(gaussian);
distributions.addOption(exponential);
distributions.addOption(zipf);
//options group for statistical distribution parameters
OptionGroup statParams = new OptionGroup();// add this to plate options
Option stdDev = Option.builder("stddev")
@@ -355,6 +363,11 @@ public class CommandLineInterface {
.hasArg()
.argName("value")
.build();
Option zipfExponent = Option.builder("exp")
.desc("If using -zipf flag, exponent value for distribution")
.hasArg()
.argName("value")
.build();
statParams.addOption(stdDev);
statParams.addOption(lambda);
//Option group for random plate or set populations
@@ -386,6 +399,7 @@ public class CommandLineInterface {
plateOptions.addOptionGroup(statParams);
plateOptions.addOptionGroup(wellPopOptions);
plateOptions.addOption(dropoutRate);
plateOptions.addOption(zipfExponent);
plateOptions.addOption(outputFileOption());
return plateOptions;
}

View File

@@ -0,0 +1,6 @@
public enum DistributionType {
POISSON,
GAUSSIAN,
EXPONENTIAL,
ZIPF
}

View File

@@ -1,72 +1,54 @@
import org.jgrapht.graph.DefaultWeightedEdge;
import org.jgrapht.graph.SimpleWeightedGraph;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.*;
public interface GraphModificationFunctions {
//remove over- and under-weight edges, return removed edges
static Map<Vertex[], Integer> filterByOverlapThresholds(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
int low, int high, boolean saveEdges) {
Map<Vertex[], Integer> removedEdges = new HashMap<>();
static Map<DefaultWeightedEdge, Vertex[]> filterByOverlapThresholds(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
int low, int high, boolean saveEdges) {
Map<DefaultWeightedEdge, Vertex[]> removedEdges = new HashMap<>();
Set<DefaultWeightedEdge> edgesToRemove = new HashSet<>();
for (DefaultWeightedEdge e : graph.edgeSet()) {
if ((graph.getEdgeWeight(e) > high) || (graph.getEdgeWeight(e) < low)) {
if(saveEdges) {
Vertex source = graph.getEdgeSource(e);
Vertex target = graph.getEdgeTarget(e);
Integer weight = (int) graph.getEdgeWeight(e);
Vertex[] edge = {source, target};
removedEdges.put(edge, weight);
}
else {
graph.setEdgeWeight(e, 0.0);
Vertex[] vertices = {graph.getEdgeSource(e), graph.getEdgeTarget(e)};
removedEdges.put(e, vertices);
}
edgesToRemove.add(e);
}
}
if(saveEdges) {
for (Vertex[] edge : removedEdges.keySet()) {
graph.removeEdge(edge[0], edge[1]);
}
}
edgesToRemove.forEach(graph::removeEdge);
return removedEdges;
}
//Remove edges for pairs with large occupancy discrepancy, return removed edges
static Map<Vertex[], Integer> filterByRelativeOccupancy(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
static Map<DefaultWeightedEdge, Vertex[]> filterByRelativeOccupancy(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
Integer maxOccupancyDifference, boolean saveEdges) {
Map<Vertex[], Integer> removedEdges = new HashMap<>();
Map<DefaultWeightedEdge, Vertex[]> removedEdges = new HashMap<>();
Set<DefaultWeightedEdge> edgesToRemove = new HashSet<>();
for (DefaultWeightedEdge e : graph.edgeSet()) {
Integer alphaOcc = graph.getEdgeSource(e).getOccupancy();
Integer betaOcc = graph.getEdgeTarget(e).getOccupancy();
if (Math.abs(alphaOcc - betaOcc) >= maxOccupancyDifference) {
if (saveEdges) {
Vertex source = graph.getEdgeSource(e);
Vertex target = graph.getEdgeTarget(e);
Integer weight = (int) graph.getEdgeWeight(e);
Vertex[] edge = {source, target};
removedEdges.put(edge, weight);
}
else {
graph.setEdgeWeight(e, 0.0);
Vertex[] vertices = {graph.getEdgeSource(e), graph.getEdgeTarget(e)};
removedEdges.put(e, vertices);
}
edgesToRemove.add(e);
}
}
if(saveEdges) {
for (Vertex[] edge : removedEdges.keySet()) {
graph.removeEdge(edge[0], edge[1]);
}
}
edgesToRemove.forEach(graph::removeEdge);
return removedEdges;
}
//Remove edges for pairs where overlap size is significantly lower than the well occupancy, return removed edges
static Map<Vertex[], Integer> filterByOverlapPercent(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
static Map<DefaultWeightedEdge, Vertex[]> filterByOverlapPercent(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
Integer minOverlapPercent,
boolean saveEdges) {
Map<Vertex[], Integer> removedEdges = new HashMap<>();
Map<DefaultWeightedEdge, Vertex[]> removedEdges = new HashMap<>();
Set<DefaultWeightedEdge> edgesToRemove = new HashSet<>();
for (DefaultWeightedEdge e : graph.edgeSet()) {
Integer alphaOcc = graph.getEdgeSource(e).getOccupancy();
Integer betaOcc = graph.getEdgeTarget(e).getOccupancy();
@@ -74,22 +56,13 @@ public interface GraphModificationFunctions {
double min = minOverlapPercent / 100.0;
if ((weight / alphaOcc < min) || (weight / betaOcc < min)) {
if (saveEdges) {
Vertex source = graph.getEdgeSource(e);
Vertex target = graph.getEdgeTarget(e);
Integer intWeight = (int) graph.getEdgeWeight(e);
Vertex[] edge = {source, target};
removedEdges.put(edge, intWeight);
}
else {
graph.setEdgeWeight(e, 0.0);
Vertex[] vertices = {graph.getEdgeSource(e), graph.getEdgeTarget(e)};
removedEdges.put(e, vertices);
}
edgesToRemove.add(e);
}
}
if(saveEdges) {
for (Vertex[] edge : removedEdges.keySet()) {
graph.removeEdge(edge[0], edge[1]);
}
}
edgesToRemove.forEach(graph::removeEdge);
return removedEdges;
}
@@ -126,10 +99,10 @@ public interface GraphModificationFunctions {
}
static void addRemovedEdges(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
Map<Vertex[], Integer> removedEdges) {
for (Vertex[] edge : removedEdges.keySet()) {
DefaultWeightedEdge e = graph.addEdge(edge[0], edge[1]);
graph.setEdgeWeight(e, removedEdges.get(edge));
Map<DefaultWeightedEdge, Vertex[]> removedEdges) {
for (DefaultWeightedEdge edge : removedEdges.keySet()) {
Vertex[] vertices = removedEdges.get(edge);
graph.addEdge(vertices[0], vertices[1], edge);
}
}

View File

@@ -89,14 +89,12 @@ public class InteractiveInterface {
private static void makePlate() {
String cellFile = null;
String filename = null;
Double stdDev = 0.0;
Double parameter = 0.0;
Integer numWells = 0;
Integer numSections;
Integer[] populations = {1};
Double dropOutRate = 0.0;
boolean poisson = false;
boolean exponential = false;
double lambda = 1.5;
;
try {
System.out.println("\nSimulated sample plates consist of:");
System.out.println("* a number of wells");
@@ -114,33 +112,46 @@ public class InteractiveInterface {
System.out.println("1) Poisson");
System.out.println("2) Gaussian");
System.out.println("3) Exponential");
// System.out.println("(Note: approximate distribution in original paper is exponential, lambda = 0.6)");
// System.out.println("(lambda value approximated from slope of log-log graph in figure 4c)");
System.out.println("4) Zipf");
System.out.println("(Note: wider distributions are more memory intensive to match)");
System.out.print("Enter selection value: ");
input = sc.nextInt();
switch (input) {
case 1 -> poisson = true;
case 1 -> {
BiGpairSEQ.setDistributionType(DistributionType.POISSON);
}
case 2 -> {
BiGpairSEQ.setDistributionType(DistributionType.GAUSSIAN);
System.out.println("How many distinct T-cells within one standard deviation of peak frequency?");
System.out.println("(Note: wider distributions are more memory intensive to match)");
stdDev = sc.nextDouble();
if (stdDev <= 0.0) {
parameter = sc.nextDouble();
if (parameter <= 0.0) {
throw new InputMismatchException("Value must be positive.");
}
}
case 3 -> {
exponential = true;
BiGpairSEQ.setDistributionType(DistributionType.EXPONENTIAL);
System.out.print("Please enter lambda value for exponential distribution: ");
lambda = sc.nextDouble();
if (lambda <= 0.0) {
lambda = 0.6;
System.out.println("Value must be positive. Defaulting to 0.6.");
parameter = sc.nextDouble();
if (parameter <= 0.0) {
parameter = 1.4;
System.out.println("Value must be positive. Defaulting to 1.4.");
}
}
case 4 -> {
BiGpairSEQ.setDistributionType(DistributionType.ZIPF);
System.out.print("Please enter exponent value for Zipf distribution: ");
parameter = sc.nextDouble();
if (parameter <= 0.0) {
parameter = 1.4;
System.out.println("Value must be positive. Defaulting to 1.4.");
}
}
default -> {
System.out.println("Invalid input. Defaulting to exponential.");
exponential = true;
parameter = 1.4;
BiGpairSEQ.setDistributionType(DistributionType.EXPONENTIAL);
}
}
System.out.print("\nNumber of wells on plate: ");
@@ -226,16 +237,17 @@ public class InteractiveInterface {
assert filename != null;
Plate samplePlate;
PlateFileWriter writer;
if(exponential){
samplePlate = new Plate(cells, cellFile, numWells, populations, dropOutRate, lambda, true);
writer = new PlateFileWriter(filename, samplePlate);
}
else {
if (poisson) {
stdDev = Math.sqrt(cells.getCellCount()); //gaussian with square root of elements approximates poisson
DistributionType type = BiGpairSEQ.getDistributionType();
switch(type) {
case POISSON -> {
parameter = Math.sqrt(cells.getCellCount()); //gaussian with square root of elements approximates poisson
samplePlate = new Plate(cells, cellFile, numWells, populations, dropOutRate, parameter);
writer = new PlateFileWriter(filename, samplePlate);
}
default -> {
samplePlate = new Plate(cells, cellFile, numWells, populations, dropOutRate, parameter);
writer = new PlateFileWriter(filename, samplePlate);
}
samplePlate = new Plate(cells, cellFile, numWells, populations, dropOutRate, stdDev, false);
writer = new PlateFileWriter(filename, samplePlate);
}
System.out.println("Writing Sample Plate to file");
writer.writePlateFile();
@@ -582,24 +594,37 @@ public class InteractiveInterface {
boolean backToOptions = false;
while(!backToOptions) {
System.out.println("\n---------ALGORITHM OPTIONS----------");
System.out.println("1) Use scaling algorithm by Duan and Su.");
System.out.println("2) Use LEDA book algorithm with Fibonacci heap priority queue");
System.out.println("3) Use LEDA book algorithm with pairing heap priority queue");
System.out.println("1) Use Hungarian algorithm with Fibonacci heap priority queue");
System.out.println("2) Use Hungarian algorithm with pairing heap priority queue");
System.out.println("3) Use auction algorithm");
System.out.println("4) Use integer weight scaling algorithm by Duan and Su. (buggy, not yet fully implemented!)");
System.out.println("0) Return to Options menu");
try {
input = sc.nextInt();
switch (input) {
case 1 -> System.out.println("This option is not yet implemented. Choose another.");
case 2 -> {
case 1 -> {
BiGpairSEQ.setHungarianAlgorithm();
BiGpairSEQ.setFibonacciHeap();
System.out.println("MWM algorithm set to LEDA with Fibonacci heap");
System.out.println("MWM algorithm set to Hungarian with Fibonacci heap");
backToOptions = true;
}
case 2 -> {
BiGpairSEQ.setHungarianAlgorithm();
BiGpairSEQ.setPairingHeap();
System.out.println("MWM algorithm set to Hungarian with pairing heap");
backToOptions = true;
}
case 3 -> {
BiGpairSEQ.setPairingHeap();
System.out.println("MWM algorithm set to LEDA with pairing heap");
BiGpairSEQ.setAuctionAlgorithm();
System.out.println("MWM algorithm set to auction");
backToOptions = true;
}
case 4 -> {
System.out.println("Scaling integer weight MWM algorithm not yet fully implemented. Sorry.");
// BiGpairSEQ.setIntegerWeightScalingAlgorithm();
// System.out.println("MWM algorithm set to integer weight scaling algorithm of Duan and Su");
// backToOptions = true;
}
case 0 -> backToOptions = true;
default -> System.out.println("Invalid input");
}

View File

@@ -0,0 +1,177 @@
import org.jgrapht.Graph;
import org.jgrapht.GraphTests;
import org.jgrapht.alg.interfaces.MatchingAlgorithm;
import java.math.BigDecimal;
import java.util.*;
/**
* Maximum weight matching in bipartite graphs with strictly integer edge weights, using a forward auction algorithm.
* This implementation uses the Gauss-Seidel version of the forward auction algorithm, in which bids are submitted
* one at a time. For any weighted bipartite graph with n vertices in the smaller partition, this algorithm will produce
* a matching that is within n*epsilon of being optimal. Using an epsilon = 1/(n+1) ensures that this matching differs
* from an optimal matching by <1. Thus, for a bipartite graph with strictly integer weights, this algorithm returns
* a maximum weight matching.
*
* See:
* "Towards auction algorithms for large dense assignment problems"
* Libor Buš and Pavel Tvrdík, Comput Optim Appl (2009) 43:411-436
* https://link.springer.com/article/10.1007/s10589-007-9146-5
*
* See also:
* Many books and papers by Dimitri Bertsekas, including chapter 4 of Linear Network Optimization:
* https://web.mit.edu/dimitrib/www/LNets_Full_Book.pdf
*
* @param <V> the graph vertex type
* @param <E> the graph edge type
*
* @author Eugene Fischer
*/
public class MaximumIntegerWeightBipartiteAuctionMatching<V, E> implements MatchingAlgorithm<V, E> {
private final Graph<V, E> graph;
private final Set<V> partition1;
private final Set<V> partition2;
private final BigDecimal epsilon;
private final Set<E> matching;
private BigDecimal matchingWeight;
private boolean swappedPartitions = false;
public MaximumIntegerWeightBipartiteAuctionMatching(Graph<V, E> graph, Set<V> partition1, Set<V> partition2) {
this.graph = GraphTests.requireUndirected(graph);
this.partition1 = Objects.requireNonNull(partition1, "Partition 1 cannot be null");
this.partition2 = Objects.requireNonNull(partition2, "Partition 2 cannot be null");
int n = Math.max(partition1.size(), partition2.size());
this.epsilon = BigDecimal.valueOf(1 / ((double) n + 1)); //The minimum price increase of a bid
this.matching = new LinkedHashSet<>();
this.matchingWeight = BigDecimal.ZERO;
}
/*
Method coded using MaximumWeightBipartiteMatching.class from JgraphT as a model
*/
@Override
public Matching<V, E> getMatching() {
/*
* Test input instance
*/
if (!GraphTests.isSimple(graph)) {
throw new IllegalArgumentException("Only simple graphs supported");
}
if (!GraphTests.isBipartitePartition(graph, partition1, partition2)) {
throw new IllegalArgumentException("Graph partition is not bipartite");
}
/*
If the two partitions are different sizes, the bidders must be the smaller of the two partitions.
*/
Set<V> items;
Set<V> bidders;
if (partition2.size() >= partition1.size()) {
bidders = partition1;
items = partition2;
}
else {
bidders = partition2;
items = partition1;
swappedPartitions = true;
}
/*
Create a map to track the owner of each item, which is initially null,
and a map to track the price of each item, which is initially 0. An
Initial price of 0 allows for asymmetric assignment (though does mean
that this form of the algorithm cannot take advantage of epsilon-scaling).
*/
Map<V, V> owners = new HashMap<>();
Map<V, BigDecimal> prices = new HashMap<>();
for(V item: items) {
owners.put(item, null);
prices.put(item, BigDecimal.ZERO);
}
//Create a queue of bidders that don't currently own an item, which is initially all of them
Queue<V> unmatchedBidders = new ArrayDeque<>();
for(V bidder: bidders) {
unmatchedBidders.offer(bidder);
}
//Run the auction while there are remaining unmatched bidders
while (unmatchedBidders.size() > 0) {
V bidder = unmatchedBidders.poll();
V item = null;
BigDecimal bestValue = BigDecimal.valueOf(-1.0);
BigDecimal runnerUpValue = BigDecimal.valueOf(-1.0);
/*
Find the items that offer the best and second-best value for the bidder,
then submit a bid equal to the price of the best-valued item plus the marginal value over
the second-best-valued item plus epsilon.
*/
for (E edge: graph.edgesOf(bidder)) {
double weight = graph.getEdgeWeight(edge);
if(weight == 0.0) {
continue;
}
V tmp = getItem(edge);
BigDecimal value = BigDecimal.valueOf(weight).subtract(prices.get(tmp));
if (value.compareTo(bestValue) >= 0) {
runnerUpValue = bestValue;
bestValue = value;
item = tmp;
}
else if (value.compareTo(runnerUpValue) >= 0) {
runnerUpValue = value;
}
}
if(bestValue.compareTo(BigDecimal.ZERO) >= 0) {
V formerOwner = owners.get(item);
BigDecimal price = prices.get(item);
BigDecimal bid = price.add(bestValue).subtract(runnerUpValue).add(epsilon);
if (formerOwner != null) {
unmatchedBidders.offer(formerOwner);
}
owners.put(item, bidder);
prices.put(item, bid);
}
}
//Add all edges between items and their owners to the matching
for (V item: owners.keySet()) {
if (owners.get(item) != null) {
matching.add(graph.getEdge(item, owners.get(item)));
}
}
//Sum the edges of the matching to obtain the matching weight
for(E edge: matching) {
this.matchingWeight = this.matchingWeight.add(BigDecimal.valueOf(graph.getEdgeWeight(edge)));
}
return new MatchingImpl<>(graph, matching, matchingWeight.doubleValue());
}
private V getItem(E edge) {
if (swappedPartitions) {
return graph.getEdgeSource(edge);
}
else {
return graph.getEdgeTarget(edge);
}
}
// //method for implementing a forward-reverse auction algorithm, not used here
// private V getBidder(E edge) {
// if (swappedPartitions) {
// return graph.getEdgeTarget(edge);
// }
// else {
// return graph.getEdgeSource(edge);
// }
// }
public BigDecimal getMatchingWeight() {
return matchingWeight;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,212 @@
import org.jgrapht.Graph;
import org.jgrapht.GraphTests;
import org.jgrapht.alg.interfaces.MatchingAlgorithm;
import org.jgrapht.alg.util.Pair;
import java.math.BigDecimal;
import java.util.*;
/*
Maximum weight matching in bipartite graphs with strictly integer edge weights, found using the
unscaled look-back auction algorithm
*/
public class MaximumWeightBipartiteLookBackAuctionMatching<V, E> implements MatchingAlgorithm<V, E> {
private final Graph<V, E> graph;
private final Set<V> partition1;
private final Set<V> partition2;
private final BigDecimal delta;
private final Set<E> matching;
private BigDecimal matchingWeight;
private boolean swappedPartitions = false;
public MaximumWeightBipartiteLookBackAuctionMatching(Graph<V, E> graph, Set<V> partition1, Set<V> partition2) {
this.graph = GraphTests.requireUndirected(graph);
this.partition1 = Objects.requireNonNull(partition1, "Partition 1 cannot be null");
this.partition2 = Objects.requireNonNull(partition2, "Partition 2 cannot be null");
int n = Math.max(partition1.size(), partition2.size());
this.delta = BigDecimal.valueOf(1 / ((double) n + 1));
this.matching = new LinkedHashSet<>();
this.matchingWeight = BigDecimal.ZERO;
}
/*
Method coded using MaximumWeightBipartiteMatching.class from JgraphT as a model
*/
@Override
public Matching<V, E> getMatching() {
/*
* Test input instance
*/
if (!GraphTests.isSimple(graph)) {
throw new IllegalArgumentException("Only simple graphs supported");
}
if (!GraphTests.isBipartitePartition(graph, partition1, partition2)) {
throw new IllegalArgumentException("Graph partition is not bipartite");
}
/*
If the two partitions are different sizes, the bidders must be the smaller of the two partitions.
*/
Set<V> items;
Set<V> bidders;
if (partition2.size() >= partition1.size()) {
bidders = partition1;
items = partition2;
}
else {
bidders = partition2;
items = partition1;
swappedPartitions = true;
}
/*
Create a map to track the owner of each item, which is initially null,
and a map to track the price of each item, which is initially 0.
*/
Map<V, V> owners = new HashMap<>();
/*
Create a map to track the prices of the objects
*/
Map<V, BigDecimal> prices = new HashMap<>();
for(V item: items) {
owners.put(item, null);
prices.put(item, BigDecimal.ZERO);
}
/*
Create a map to track the most valuable object for a bidder
*/
Map<V, V> mostValuableItems = new HashMap<>();
/*
Create a map to track the second most valuable object for a bidder
*/
Map<V, V> runnerUpItems = new HashMap<>();
/*
Create a map to track the bidder value thresholds
*/
Map<V, BigDecimal> valueThresholds = new HashMap<>();
//Initialize queue of all bidders that don't currently own an item
Queue<V> unmatchedBidders = new ArrayDeque<>();
for(V bidder: bidders) {
unmatchedBidders.offer(bidder);
valueThresholds.put(bidder, BigDecimal.ZERO);
mostValuableItems.put(bidder, null);
runnerUpItems.put(bidder, null);
}
while (unmatchedBidders.size() > 0) {
V bidder = unmatchedBidders.poll();
// BigDecimal valueThreshold = valueThresholds.get(bidder);
BigDecimal bestValue = BigDecimal.ZERO;
BigDecimal runnerUpValue = BigDecimal.ZERO;
boolean reinitialize = true;
// if (mostValuableItems.get(bidder) != null && runnerUpItems.get(bidder) != null) {
// reinitialize = false;
// //get the weight of the edge between the bidder and the best valued item
// V bestItem = mostValuableItems.get(bidder);
// BigDecimal bestItemWeight = BigDecimal.valueOf(graph.getEdgeWeight(graph.getEdge(bidder, bestItem)));
// bestValue = bestItemWeight.subtract(prices.get(bestItem));
// V runnerUpItem = runnerUpItems.get(bidder);
// BigDecimal runnerUpWeight = BigDecimal.valueOf(graph.getEdgeWeight(graph.getEdge(bidder, runnerUpItem)));
// runnerUpValue = runnerUpWeight.subtract(prices.get(runnerUpItem));
// //if both values are still above the threshold
// if (bestValue.compareTo(valueThreshold) >= 0 && runnerUpValue.compareTo(valueThreshold) >= 0) {
// if (bestValue.compareTo(runnerUpValue) < 0) { //if best value is lower than runner up
// BigDecimal tmp = bestValue;
// bestValue = runnerUpValue;
// runnerUpValue = tmp;
// mostValuableItems.put(bidder, runnerUpItem);
// runnerUpItems.put(bidder, bestItem);
// }
// BigDecimal newValueThreshold = bestValue.min(runnerUpValue);
// valueThresholds.put(bidder, newValueThreshold);
// System.out.println("lookback successful");
// }
// else {
// reinitialize = true; //lookback failed
// }
// }
if (reinitialize){
bestValue = BigDecimal.ZERO;
runnerUpValue = BigDecimal.ZERO;
for (E edge: graph.edgesOf(bidder)) {
double weight = graph.getEdgeWeight(edge);
if (weight == 0.0) {
continue;
}
V tmpItem = getItem(bidder, edge);
BigDecimal tmpValue = BigDecimal.valueOf(weight).subtract(prices.get(tmpItem));
if (tmpValue.compareTo(bestValue) >= 0) {
runnerUpValue = bestValue;
bestValue = tmpValue;
runnerUpItems.put(bidder, mostValuableItems.get(bidder));
mostValuableItems.put(bidder, tmpItem);
}
else if (tmpValue.compareTo(runnerUpValue) >= 0) {
runnerUpValue = tmpValue;
runnerUpItems.put(bidder, tmpItem);
}
}
valueThresholds.put(bidder, runnerUpValue);
}
//Should now have initialized the maps to make look back possible
//skip this bidder if the best value is still zero
if (BigDecimal.ZERO.equals(bestValue)) {
continue;
}
V mostValuableItem = mostValuableItems.get(bidder);
BigDecimal price = prices.get(mostValuableItem);
BigDecimal bid = price.add(bestValue).subtract(runnerUpValue).add(this.delta);
V formerOwner = owners.get(mostValuableItem);
if (formerOwner != null) {
unmatchedBidders.offer(formerOwner);
}
owners.put(mostValuableItem, bidder);
prices.put(mostValuableItem, bid);
}
for (V item: owners.keySet()) {
if (owners.get(item) != null) {
matching.add(graph.getEdge(item, owners.get(item)));
}
}
for(E edge: matching) {
this.matchingWeight = this.matchingWeight.add(BigDecimal.valueOf(graph.getEdgeWeight(edge)));
}
return new MatchingImpl<>(graph, matching, matchingWeight.doubleValue());
}
private V getItem(V bidder, E edge) {
if (swappedPartitions) {
return graph.getEdgeSource(edge);
}
else {
return graph.getEdgeTarget(edge);
}
}
private V getBidder(V item, E edge) {
if (swappedPartitions) {
return graph.getEdgeTarget(edge);
}
else {
return graph.getEdgeSource(edge);
}
}
public BigDecimal getMatchingWeight() {
return matchingWeight;
}
}

View File

@@ -13,6 +13,10 @@ TODO: Implement discrete frequency distributions using Vose's Alias Method
*/
import org.apache.commons.rng.sampling.distribution.RejectionInversionZipfSampler;
import org.apache.commons.rng.simple.JDKRandomWrapper;
import java.util.*;
public class Plate {
@@ -26,25 +30,22 @@ public class Plate {
private Integer[] populations;
private double stdDev;
private double lambda;
boolean exponential = false;
private double zipfExponent;
private DistributionType distributionType;
public Plate(CellSample cells, String cellFilename, int numWells, Integer[] populations,
double dropoutRate, double stdDev_or_lambda, boolean exponential){
double dropoutRate, double parameter){
this.cells = cells;
this.sourceFile = cellFilename;
this.size = numWells;
this.wells = new ArrayList<>();
this.error = dropoutRate;
this.populations = populations;
this.exponential = exponential;
if (this.exponential) {
this.lambda = stdDev_or_lambda;
fillWellsExponential(cells.getCells(), this.lambda);
}
else {
this.stdDev = stdDev_or_lambda;
fillWells(cells.getCells(), this.stdDev);
}
this.stdDev = parameter;
this.lambda = parameter;
this.zipfExponent = parameter;
this.distributionType = BiGpairSEQ.getDistributionType();
fillWells(cells.getCells());
}
@@ -61,21 +62,57 @@ public class Plate {
this.wells = wells;
this.size = wells.size();
double totalCellCount = 0.0;
double totalDropoutCount = 0.0;
List<Integer> concentrations = new ArrayList<>();
for (List<String[]> w: wells) {
if(!concentrations.contains(w.size())){
concentrations.add(w.size());
}
for (String[] cell: w) {
totalCellCount += 1.0;
for (String sequence: cell) {
if("-1".equals(sequence)) {
totalDropoutCount += 1.0;
}
}
}
}
double totalSequenceCount = totalCellCount * 4;
this.error = totalDropoutCount / totalSequenceCount;
this.populations = new Integer[concentrations.size()];
for (int i = 0; i < this.populations.length; i++) {
this.populations[i] = concentrations.get(i);
}
}
private void fillWellsZipf(List<String[]> cells, double exponent) {
int numSections = populations.length;
int section = 0;
int n;
RejectionInversionZipfSampler zipfSampler = new RejectionInversionZipfSampler(new JDKRandomWrapper(rand), cells.size(), exponent);
while (section < numSections){
for (int i = 0; i < (size / numSections); i++) {
List<String[]> well = new ArrayList<>();
for (int j = 0; j < populations[section]; j++) {
do {
n = zipfSampler.sample();
} while (n >= cells.size() || n < 0);
String[] cellToAdd = cells.get(n).clone();
for(int k = 0; k < cellToAdd.length; k++){
if(Math.abs(rand.nextDouble()) < error){//error applied to each sequence
cellToAdd[k] = "-1";
}
}
well.add(cellToAdd);
}
wells.add(well);
}
section++;
}
}
private void fillWellsExponential(List<String[]> cells, double lambda){
this.lambda = lambda;
exponential = true;
int numSections = populations.length;
int section = 0;
double m;
@@ -131,6 +168,24 @@ public class Plate {
}
}
private void fillWells(List<String[]> cells){
DistributionType type = BiGpairSEQ.getDistributionType();
switch (type) {
case POISSON, GAUSSIAN -> {
fillWells(cells, getStdDev());
break;
}
case EXPONENTIAL -> {
fillWellsExponential(cells, getLambda());
break;
}
case ZIPF -> {
fillWellsZipf(cells, getZipfExponent());
break;
}
}
}
public Integer[] getPopulations(){
return populations;
}
@@ -143,10 +198,12 @@ public class Plate {
return stdDev;
}
public boolean isExponential(){return exponential;}
public DistributionType getDistributionType() { return distributionType;}
public double getLambda(){return lambda;}
public double getZipfExponent(){return zipfExponent;}
public double getError() {
return error;
}
@@ -184,7 +241,7 @@ public class Plate {
sequencesAndMisreads.put(currentSequence, new ArrayList<>());
}
//The specific misread hasn't happened before
if (rand.nextDouble() >= errorCollisionRate || sequencesAndMisreads.get(currentSequence).size() == 0) {
if (rand.nextDouble() >= errorCollisionRate || sequencesAndMisreads.get(currentSequence).isEmpty()) {
//The misread doesn't collide with a real sequence already on the plate and some sequences have already been read
if(rand.nextDouble() >= realSequenceCollisionRate || !sequenceMap.isEmpty()){
StringBuilder spurious = new StringBuilder(currentSequence);

View File

@@ -13,11 +13,13 @@ public class PlateFileWriter {
private List<List<String[]>> wells;
private double stdDev;
private double lambda;
private double zipfExponent;
private DistributionType distributionType;
private Double error;
private String filename;
private String sourceFileName;
private Integer[] populations;
private boolean isExponential = false;
public PlateFileWriter(String filename, Plate plate) {
if(!filename.matches(".*\\.csv")){
@@ -26,12 +28,17 @@ public class PlateFileWriter {
this.filename = filename;
this.sourceFileName = plate.getSourceFileName();
this.size = plate.getSize();
this.isExponential = plate.isExponential();
if(isExponential) {
this.lambda = plate.getLambda();
}
else{
this.stdDev = plate.getStdDev();
this.distributionType = plate.getDistributionType();
switch(distributionType) {
case POISSON, GAUSSIAN -> {
this.stdDev = plate.getStdDev();
}
case EXPONENTIAL -> {
this.lambda = plate.getLambda();
}
case ZIPF -> {
this.zipfExponent = plate.getZipfExponent();
}
}
this.error = plate.getError();
this.wells = plate.getWells();
@@ -93,13 +100,24 @@ public class PlateFileWriter {
printer.printComment("Cell source file name: " + sourceFileName);
printer.printComment("Each row represents one well on the plate.");
printer.printComment("Plate size: " + size);
printer.printComment("Error rate: " + error);
printer.printComment("Well populations: " + wellPopulationsString);
if(isExponential){
printer.printComment("Lambda: " + lambda);
}
else {
printer.printComment("Std. dev.: " + stdDev);
printer.printComment("Error rate: " + error);
switch (distributionType) {
case POISSON -> {
printer.printComment("Cell frequency distribution: POISSON");
}
case GAUSSIAN -> {
printer.printComment("Cell frequency distribution: GAUSSIAN");
printer.printComment("--Standard deviation: " + stdDev);
}
case EXPONENTIAL -> {
printer.printComment("Cell frequency distribution: EXPONENTIAL");
printer.printComment("--Lambda: " + lambda);
}
case ZIPF -> {
printer.printComment("Cell frequency distribution: ZIPF");
printer.printComment("--Exponent: " + zipfExponent);
}
}
printer.printRecords(wellsAsStrings);
} catch(IOException ex){

View File

@@ -1,9 +1,8 @@
import org.jgrapht.Graphs;
import org.jgrapht.alg.interfaces.MatchingAlgorithm;
import org.jgrapht.alg.matching.MaximumWeightBipartiteMatching;
import org.jgrapht.generate.SimpleWeightedBipartiteGraphMatrixGenerator;
import org.jgrapht.graph.DefaultWeightedEdge;
import org.jgrapht.graph.SimpleWeightedGraph;
import org.jheaps.tree.FibonacciHeap;
import org.jheaps.tree.PairingHeap;
import java.math.BigDecimal;
@@ -70,58 +69,104 @@ public class Simulator implements GraphModificationFunctions {
if(verbose){System.out.println("Total beta sequence wells removed: " + betaWellsRemoved);}
}
//construct the graph. For simplicity, going to make
if(verbose){System.out.println("Making vertex maps");}
//For the SimpleWeightedBipartiteGraphMatrixGenerator, all vertices must have
//distinct numbers associated with them. Since I'm using a 2D array, that means
//distinct indices between the rows and columns. vertexStartValue lets me track where I switch
//from numbering rows to columns, so I can assign unique numbers to every vertex, and then
//subtract the vertexStartValue from betas to use their vertex labels as array indices
int vertexStartValue = 0;
//keys are sequential integer vertices, values are alphas
Map<String, Integer> plateAtoVMap = makeSequenceToVertexMap(alphaSequences, vertexStartValue);
//new start value for vertex to beta map should be one more than final vertex value in alpha map
vertexStartValue += plateAtoVMap.size();
//keys are betas, values are sequential integers
Map<String, Integer> plateBtoVMap = makeSequenceToVertexMap(betaSequences, vertexStartValue);
if(verbose){System.out.println("Vertex maps made");}
//make adjacency matrix for bipartite graph generator
//(technically this is only 1/4 of an adjacency matrix, but that's all you need
//for a bipartite graph, and all the SimpleWeightedBipartiteGraphMatrixGenerator class expects.)
if(verbose){System.out.println("Making adjacency matrix");}
double[][] weights = new double[plateAtoVMap.size()][plateBtoVMap.size()];
fillAdjacencyMatrix(weights, vertexStartValue, alphaSequences, betaSequences, plateAtoVMap, plateBtoVMap);
if(verbose){System.out.println("Adjacency matrix made");}
/*
* The commented out code below works beautifully for small enough graphs. However, after implementing a
* Zipf distribution and attempting to simulate Experiment 3 from the paper again, I discovered that
* this method uses too much memory. Even a 120GB heap is not enough to build this adjacency matrix.
* So I'm going to attempt to build this graph directly and see if that is less memory intensive
*/
// //construct the graph. For simplicity, going to make
// if(verbose){System.out.println("Making vertex maps");}
// //For the SimpleWeightedBipartiteGraphMatrixGenerator, all vertices must have
// //distinct numbers associated with them. Since I'm using a 2D array, that means
// //distinct indices between the rows and columns. vertexStartValue lets me track where I switch
// //from numbering rows to columns, so I can assign unique numbers to every vertex, and then
// //subtract the vertexStartValue from betas to use their vertex labels as array indices
// int vertexStartValue = 0;
// //keys are sequential integer vertices, values are alphas
// Map<String, Integer> plateAtoVMap = makeSequenceToVertexMap(alphaSequences, vertexStartValue);
// //new start value for vertex to beta map should be one more than final vertex value in alpha map
// vertexStartValue += plateAtoVMap.size();
// //keys are betas, values are sequential integers
// Map<String, Integer> plateBtoVMap = makeSequenceToVertexMap(betaSequences, vertexStartValue);
// if(verbose){System.out.println("Vertex maps made");}
// //make adjacency matrix for bipartite graph generator
// //(technically this is only 1/4 of an adjacency matrix, but that's all you need
// //for a bipartite graph, and all the SimpleWeightedBipartiteGraphMatrixGenerator class expects.)
// if(verbose){System.out.println("Making adjacency matrix");}
// double[][] weights = new double[plateAtoVMap.size()][plateBtoVMap.size()];
// fillAdjacencyMatrix(weights, vertexStartValue, alphaSequences, betaSequences, plateAtoVMap, plateBtoVMap);
// if(verbose){System.out.println("Adjacency matrix made");}
// //make bipartite graph
// if(verbose){System.out.println("Making bipartite weighted graph");}
// //the graph object
// SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph =
// new SimpleWeightedGraph<>(DefaultWeightedEdge.class);
// //the graph generator
// SimpleWeightedBipartiteGraphMatrixGenerator graphGenerator = new SimpleWeightedBipartiteGraphMatrixGenerator();
// //the list of alpha vertices
// List<Vertex> alphaVertices = new ArrayList<>();
// for (String seq : plateAtoVMap.keySet()) {
// Vertex alphaVertex = new Vertex(alphaSequences.get(seq), plateAtoVMap.get(seq));
// alphaVertices.add(alphaVertex);
// }
// //Sort to make sure the order of vertices in list matches the order of the adjacency matrix
// Collections.sort(alphaVertices);
// //Add ordered list of vertices to the graph
// graphGenerator.first(alphaVertices);
// //the list of beta vertices
// List<Vertex> betaVertices = new ArrayList<>();
// for (String seq : plateBtoVMap.keySet()) {
// Vertex betaVertex = new Vertex(betaSequences.get(seq), plateBtoVMap.get(seq));
// betaVertices.add(betaVertex);
// }
// //Sort to make sure the order of vertices in list matches the order of the adjacency matrix
// Collections.sort(betaVertices);
// //Add ordered list of vertices to the graph
// graphGenerator.second(betaVertices);
// //use adjacency matrix of weight created previously
// graphGenerator.weights(weights);
// graphGenerator.generateGraph(graph);
//make bipartite graph
if(verbose){System.out.println("Making bipartite weighted graph");}
//the graph object
SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph =
new SimpleWeightedGraph<>(DefaultWeightedEdge.class);
//the graph generator
SimpleWeightedBipartiteGraphMatrixGenerator graphGenerator = new SimpleWeightedBipartiteGraphMatrixGenerator();
//the list of alpha vertices
int vertexLabelValue = 0;
//create and add alpha sequence vertices
List<Vertex> alphaVertices = new ArrayList<>();
for (String seq : plateAtoVMap.keySet()) {
Vertex alphaVertex = new Vertex(alphaSequences.get(seq), plateAtoVMap.get(seq));
alphaVertices.add(alphaVertex);
for (Map.Entry<String, SequenceRecord> entry: alphaSequences.entrySet()) {
alphaVertices.add(new Vertex(entry.getValue(), vertexLabelValue));
vertexLabelValue++;
}
//Sort to make sure the order of vertices in list matches the order of the adjacency matrix
Collections.sort(alphaVertices);
//Add ordered list of vertices to the graph
graphGenerator.first(alphaVertices);
//the list of beta vertices
alphaVertices.forEach(graph::addVertex);
//add beta sequence vertices
List<Vertex> betaVertices = new ArrayList<>();
for (String seq : plateBtoVMap.keySet()) {
Vertex betaVertex = new Vertex(betaSequences.get(seq), plateBtoVMap.get(seq));
betaVertices.add(betaVertex);
for (Map.Entry<String, SequenceRecord> entry: betaSequences.entrySet()) {
betaVertices.add(new Vertex(entry.getValue(), vertexLabelValue));
vertexLabelValue++;
}
betaVertices.forEach(graph::addVertex);
//add edges (best so far)
int edgesAddedCount = 0;
for(Vertex a: alphaVertices) {
Set<Integer> a_wells = a.getRecord().getWells();
for(Vertex b: betaVertices) {
Set<Integer> sharedWells = new HashSet<>(a_wells);
sharedWells.retainAll(b.getRecord().getWells());
if (!sharedWells.isEmpty()) {
Graphs.addEdge(graph, a, b, (double) sharedWells.size());
}
edgesAddedCount++;
if (edgesAddedCount % 10000000 == 0) { //collect garbage every 10,000,000 edges
System.out.println(edgesAddedCount + " edges added");
//request garbage collection
System.gc();
System.out.println("Garbage collection requested");
}
}
}
//Sort to make sure the order of vertices in list matches the order of the adjacency matrix
Collections.sort(betaVertices);
//Add ordered list of vertices to the graph
graphGenerator.second(betaVertices);
//use adjacency matrix of weight created previously
graphGenerator.weights(weights);
graphGenerator.generateGraph(graph);
if(verbose){System.out.println("Graph created");}
//stop timing
Instant stop = Instant.now();
@@ -145,7 +190,7 @@ public class Simulator implements GraphModificationFunctions {
Integer minOverlapPercent, boolean verbose, boolean calculatePValue) {
Instant start = Instant.now();
SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph = data.getGraph();
Map<Vertex[], Integer> removedEdges = new HashMap<>();
Map<DefaultWeightedEdge, Vertex[]> removedEdges = new HashMap<>();
boolean saveEdges = BiGpairSEQ.cacheGraph();
int numWells = data.getNumWells();
//Integer alphaCount = data.getAlphaCount();
@@ -163,6 +208,7 @@ public class Simulator implements GraphModificationFunctions {
}
Integer graphAlphaCount = alphas.size();
Integer graphBetaCount = betas.size();
Integer graphEdgeCount = graph.edgeSet().size();
//remove edges with weights outside given overlap thresholds, add those to removed edge list
if(verbose){System.out.println("Eliminating edges with weights outside overlap threshold values");}
@@ -182,33 +228,39 @@ public class Simulator implements GraphModificationFunctions {
if(verbose){System.out.println("Edges between vertices of with excessively different occupancy values " +
"removed");}
Integer filteredGraphEdgeCount = graph.edgeSet().size();
//Find Maximum Weight Matching
//using jheaps library class PairingHeap for improved efficiency
if(verbose){System.out.println("Finding maximum weight matching");}
MaximumWeightBipartiteMatching maxWeightMatching;
//Use correct heap type for priority queue
String heapType = BiGpairSEQ.getPriorityQueueHeapType();
switch (heapType) {
case "PAIRING" -> {
maxWeightMatching = new MaximumWeightBipartiteMatching(graph,
alphas,
betas,
i -> new PairingHeap(Comparator.naturalOrder()));
//The matching object
MatchingAlgorithm<Vertex, DefaultWeightedEdge> maxWeightMatching;
//Determine algorithm type
AlgorithmType algorithm = BiGpairSEQ.getMatchingAlgorithmType();
switch (algorithm) { //Only two options now, but I have room to add more algorithms in the future this way
case AUCTION -> {
//create a new MaximumIntegerWeightBipartiteAuctionMatching
maxWeightMatching = new MaximumIntegerWeightBipartiteAuctionMatching<>(graph, alphas, betas);
}
case "FIBONACCI" -> {
maxWeightMatching = new MaximumWeightBipartiteMatching(graph,
alphas,
betas,
i -> new FibonacciHeap(Comparator.naturalOrder()));
case INTEGER_WEIGHT_SCALING -> {
maxWeightMatching = new MaximumIntegerWeightBipartiteMatching<>(graph, alphas, betas, new BigDecimal(highThreshold));
}
default -> {
maxWeightMatching = new MaximumWeightBipartiteMatching(graph,
alphas,
betas);
default -> { //HUNGARIAN
//use selected heap type for priority queue
HeapType heap = BiGpairSEQ.getPriorityQueueHeapType();
if(HeapType.PAIRING.equals(heap)) {
maxWeightMatching = new MaximumWeightBipartiteMatching<Vertex, DefaultWeightedEdge>(graph,
alphas,
betas,
i -> new PairingHeap(Comparator.naturalOrder()));
}
else {//Fibonacci is the default, and what's used in the JGraphT implementation
maxWeightMatching = new MaximumWeightBipartiteMatching<Vertex, DefaultWeightedEdge>(graph,
alphas,
betas);
}
}
}
//get the matching
MatchingAlgorithm.Matching<String, DefaultWeightedEdge> graphMatching = maxWeightMatching.getMatching();
MatchingAlgorithm.Matching<Vertex, DefaultWeightedEdge> matching = maxWeightMatching.getMatching();
if(verbose){System.out.println("Matching completed");}
Instant stop = Instant.now();
@@ -226,7 +278,7 @@ public class Simulator implements GraphModificationFunctions {
List<List<String>> allResults = new ArrayList<>();
NumberFormat nf = NumberFormat.getInstance(Locale.US);
MathContext mc = new MathContext(3);
Iterator<DefaultWeightedEdge> weightIter = graphMatching.iterator();
Iterator<DefaultWeightedEdge> weightIter = matching.iterator();
DefaultWeightedEdge e;
int trueCount = 0;
int falseCount = 0;
@@ -267,10 +319,22 @@ public class Simulator implements GraphModificationFunctions {
}
//Metadata comments for CSV file
String algoType = "LEDA book with heap: " + heapType;
String algoType;
switch(algorithm) {
case AUCTION -> {
algoType = "Auction algorithm";
}
case INTEGER_WEIGHT_SCALING -> {
algoType = "Integer weight scaling algorithm from Duan and Su (not yet perfectly implemented)";
}
default -> { //HUNGARIAN
algoType = "Hungarian algorithm with heap: " + BiGpairSEQ.getPriorityQueueHeapType().name();
}
}
int min = Math.min(graphAlphaCount, graphBetaCount);
//matching weight
BigDecimal totalMatchingWeight = maxWeightMatching.getMatchingWeight();
Double matchingWeight = matching.getWeight();
//rate of attempted matching
double attemptRate = (double) (trueCount + falseCount) / min;
BigDecimal attemptRateTrunc = new BigDecimal(attemptRate, mc);
@@ -309,7 +373,7 @@ public class Simulator implements GraphModificationFunctions {
metadata.put("sequence dropout rate", data.getDropoutRate().toString());
metadata.put("graph filename", dataFilename);
metadata.put("MWM algorithm type", algoType);
metadata.put("matching weight", totalMatchingWeight.toString());
metadata.put("matching weight", matchingWeight.toString());
metadata.put("well populations", wellPopulationsString);
metadata.put("sequence read depth", data.getReadDepth().toString());
metadata.put("sequence read error rate", data.getReadErrorRate().toString());
@@ -317,8 +381,10 @@ public class Simulator implements GraphModificationFunctions {
metadata.put("real sequence collision rate", data.getRealSequenceCollisionRate().toString());
metadata.put("total alphas read from plate", data.getAlphaCount().toString());
metadata.put("total betas read from plate", data.getBetaCount().toString());
metadata.put("initial edges in graph", graphEdgeCount.toString());
metadata.put("alphas in graph (after pre-filtering)", graphAlphaCount.toString());
metadata.put("betas in graph (after pre-filtering)", graphBetaCount.toString());
metadata.put("final edges in graph (after pre-filtering)", filteredGraphEdgeCount.toString());
metadata.put("high overlap threshold for pairing", highThreshold.toString());
metadata.put("low overlap threshold for pairing", lowThreshold.toString());
metadata.put("minimum overlap percent for pairing", minOverlapPercent.toString());
@@ -347,6 +413,7 @@ public class Simulator implements GraphModificationFunctions {
return output;
}
//Commented out CDR1 matching until it's time to re-implement it
// //Simulated matching of CDR1s to CDR3s. Requires MatchingResult from prior run of matchCDR3s.
// public static MatchingResult[] matchCDR1s(List<Integer[]> distinctCells,

View File

@@ -74,4 +74,12 @@ public class Vertex implements Serializable, Comparable<Vertex> {
public int compareTo(Vertex other) {
return this.vertexLabel - other.getVertexLabel();
}
public Double getPotential() {
return potential;
}
public void setPotential(Double potential) {
this.potential = potential;
}
}