Compare commits
104 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4099ec2623 | ||
|
|
7744586e79 | ||
|
|
83eff0d1e7 | ||
|
|
d1810c453d | ||
|
|
187401f2d6 | ||
|
|
678ce99424 | ||
|
|
c21e375303 | ||
|
|
57fe9c1619 | ||
|
|
e1888a99c6 | ||
|
|
bcf5a4c749 | ||
|
|
81d8a12765 | ||
|
|
b5c0568e22 | ||
|
|
b7597cff2a | ||
|
|
7bbeaf7dad | ||
|
|
945b967382 | ||
|
|
a43ee469ea | ||
|
|
161a52aa89 | ||
|
|
9b2ad9da09 | ||
|
|
30a3f6e33d | ||
|
|
8cc1f19da1 | ||
|
|
3efa5c26d8 | ||
|
|
e686d4957b | ||
|
|
fbc0496675 | ||
|
|
0071cafbbd | ||
|
|
3d302cf8ad | ||
|
|
5f5d77b0a4 | ||
|
|
af32be85ee | ||
|
|
58cdf9ae93 | ||
|
|
202ad4c834 | ||
|
|
96d49d0034 | ||
|
|
d8e5f7ece0 | ||
|
|
9c81d919b4 | ||
|
|
70b08e7c22 | ||
|
|
44158d264c | ||
|
|
e97c2989db | ||
|
|
f7709ada73 | ||
|
|
25b37eff48 | ||
|
|
fbbb5a8792 | ||
|
|
4b9d7f8494 | ||
|
|
0de12a3a12 | ||
|
|
3c2ec9002e | ||
|
|
bcf3af5a83 | ||
|
|
fcca22a2f0 | ||
|
|
910de0ce9d | ||
|
|
ef349ea5f6 | ||
|
|
174db66c46 | ||
|
|
b3273855a6 | ||
|
|
51c1bc2551 | ||
|
|
f7d522e95d | ||
|
|
5f0c089b0a | ||
|
|
d3066095d9 | ||
|
|
55a5d9a892 | ||
|
|
49708f2f8a | ||
|
|
c7934ca498 | ||
|
|
8f0ed91cb7 | ||
|
|
40bc2ce88d | ||
|
|
a5a17d1f76 | ||
|
|
0f3ab0fdd7 | ||
|
|
01596ef43a | ||
|
|
cda25a2c62 | ||
|
|
bde6da3076 | ||
|
|
2eede214c0 | ||
|
|
98ce708825 | ||
|
|
e7e85a4542 | ||
|
|
c0dd2d31f2 | ||
|
|
cf103c5223 | ||
|
|
26f66fe139 | ||
|
|
89295777ef | ||
|
|
99c92e6eb5 | ||
|
|
b82176517c | ||
|
|
0657db5653 | ||
|
|
9f0ac227e2 | ||
|
|
54896bc47f | ||
|
|
b19a4d37c2 | ||
|
|
457d643477 | ||
|
|
593dd6c60f | ||
|
|
b8aeeb988f | ||
|
|
b9b13fb75e | ||
|
|
289220e0d0 | ||
|
|
19badac92b | ||
|
|
633334a1b8 | ||
|
|
e308e47578 | ||
|
|
133984276f | ||
|
|
ec6713a1c0 | ||
| 097590cf21 | |||
|
|
f1e4c4f194 | ||
|
|
b6218c3ed3 | ||
|
|
756e5572b9 | ||
|
|
c30167d5ec | ||
|
|
a19525f5bb | ||
|
|
e5803defa3 | ||
|
|
34dc2a5721 | ||
|
|
fd106a0d73 | ||
|
|
22faad3414 | ||
|
|
0b36e2b742 | ||
|
|
9dacd8cd34 | ||
|
|
89687fa849 | ||
|
|
fb443fe958 | ||
|
|
adebe1542e | ||
|
|
882fbfffc6 | ||
|
|
a88cfb8b0d | ||
|
|
deed98e79d | ||
|
|
1a35600f50 | ||
|
|
856063529b |
1
.idea/.name
generated
Normal file
1
.idea/.name
generated
Normal file
@@ -0,0 +1 @@
|
|||||||
|
BiGpairSEQ
|
||||||
27
.idea/artifacts/BiGpairSEQ_Sim_jar.xml
generated
27
.idea/artifacts/BiGpairSEQ_Sim_jar.xml
generated
@@ -1,16 +1,27 @@
|
|||||||
<component name="ArtifactManager">
|
<component name="ArtifactManager">
|
||||||
<artifact type="jar" build-on-make="true" name="BiGpairSEQ_Sim:jar">
|
<artifact type="jar" name="BiGpairSEQ_Sim:jar">
|
||||||
<output-path>$PROJECT_DIR$/out/artifacts/BiGpairSEQ_Sim_jar</output-path>
|
<output-path>$PROJECT_DIR$/out/artifacts/BiGpairSEQ_Sim_jar</output-path>
|
||||||
<root id="archive" name="BiGpairSEQ_Sim.jar">
|
<root id="archive" name="BiGpairSEQ_Sim.jar">
|
||||||
<element id="directory" name="META-INF">
|
<element id="directory" name="META-INF">
|
||||||
<element id="file-copy" path="$PROJECT_DIR$/src/main/java/META-INF/MANIFEST.MF" />
|
<element id="file-copy" path="$PROJECT_DIR$/META-INF/MANIFEST.MF" />
|
||||||
</element>
|
</element>
|
||||||
<element id="module-output" name="BigPairSEQ" />
|
<element id="module-output" name="BiGpairSEQ_Sim" />
|
||||||
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.1/jgrapht-core-1.5.1.jar" path-in-jar="/" />
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.2/jgrapht-core-1.5.2.jar" path-in-jar="/" />
|
||||||
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.13/jheaps-0.13.jar" path-in-jar="/" />
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-rng-sampling/1.6/commons-rng-sampling-1.6.jar" path-in-jar="/" />
|
||||||
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/commons-cli/commons-cli/1.5.0/commons-cli-1.5.0.jar" path-in-jar="/" />
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-csv/1.14.0/commons-csv-1.14.0.jar" path-in-jar="/" />
|
||||||
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-csv/1.9.0/commons-csv-1.9.0.jar" path-in-jar="/" />
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jetbrains/annotations/26.0.2/annotations-26.0.2.jar" path-in-jar="/" />
|
||||||
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jetbrains/annotations/23.0.0/annotations-23.0.0.jar" path-in-jar="/" />
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-io/1.5.2/jgrapht-io-1.5.2.jar" path-in-jar="/" />
|
||||||
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-rng-simple/1.6/commons-rng-simple-1.6.jar" path-in-jar="/" />
|
||||||
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/commons-io/commons-io/2.18.0/commons-io-2.18.0.jar" path-in-jar="/" />
|
||||||
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-rng-core/1.6/commons-rng-core-1.6.jar" path-in-jar="/" />
|
||||||
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/commons-codec/commons-codec/1.18.0/commons-codec-1.18.0.jar" path-in-jar="/" />
|
||||||
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-rng-client-api/1.6/commons-rng-client-api-1.6.jar" path-in-jar="/" />
|
||||||
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/commons-cli/commons-cli/1.9.0/commons-cli-1.9.0.jar" path-in-jar="/" />
|
||||||
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar" path-in-jar="/" />
|
||||||
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/antlr/antlr4-runtime/4.12.0/antlr4-runtime-4.12.0.jar" path-in-jar="/" />
|
||||||
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apfloat/apfloat/1.10.1/apfloat-1.10.1.jar" path-in-jar="/" />
|
||||||
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/commons/commons-text/1.10.0/commons-text-1.10.0.jar" path-in-jar="/" />
|
||||||
|
<element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.14/jheaps-0.14.jar" path-in-jar="/" />
|
||||||
</root>
|
</root>
|
||||||
</artifact>
|
</artifact>
|
||||||
</component>
|
</component>
|
||||||
1
.idea/compiler.xml
generated
1
.idea/compiler.xml
generated
@@ -7,6 +7,7 @@
|
|||||||
<sourceTestOutputDir name="target/generated-test-sources/test-annotations" />
|
<sourceTestOutputDir name="target/generated-test-sources/test-annotations" />
|
||||||
<outputRelativeToContentRoot value="true" />
|
<outputRelativeToContentRoot value="true" />
|
||||||
<module name="BigPairSEQ" />
|
<module name="BigPairSEQ" />
|
||||||
|
<module name="BiGpairSEQ_Sim" />
|
||||||
</profile>
|
</profile>
|
||||||
</annotationProcessing>
|
</annotationProcessing>
|
||||||
</component>
|
</component>
|
||||||
|
|||||||
25
.idea/jarRepositories.xml
generated
25
.idea/jarRepositories.xml
generated
@@ -1,20 +1,35 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project version="4">
|
<project version="4">
|
||||||
<component name="RemoteRepositoriesConfiguration">
|
<component name="RemoteRepositoriesConfiguration">
|
||||||
|
<remote-repository>
|
||||||
|
<option name="id" value="my-internal-site" />
|
||||||
|
<option name="name" value="my-internal-site" />
|
||||||
|
<option name="url" value="https://myserver/repo" />
|
||||||
|
</remote-repository>
|
||||||
|
<remote-repository>
|
||||||
|
<option name="id" value="central" />
|
||||||
|
<option name="name" value="Central Repository" />
|
||||||
|
<option name="url" value="https://repo1.maven.org/maven2" />
|
||||||
|
</remote-repository>
|
||||||
|
<remote-repository>
|
||||||
|
<option name="id" value="central repo" />
|
||||||
|
<option name="name" value="central repo" />
|
||||||
|
<option name="url" value="https://repo1.maven.org/maven2/" />
|
||||||
|
</remote-repository>
|
||||||
<remote-repository>
|
<remote-repository>
|
||||||
<option name="id" value="central" />
|
<option name="id" value="central" />
|
||||||
<option name="name" value="Central Repository" />
|
<option name="name" value="Central Repository" />
|
||||||
<option name="url" value="https://repo.maven.apache.org/maven2" />
|
<option name="url" value="https://repo.maven.apache.org/maven2" />
|
||||||
</remote-repository>
|
</remote-repository>
|
||||||
<remote-repository>
|
|
||||||
<option name="id" value="central" />
|
|
||||||
<option name="name" value="Maven Central repository" />
|
|
||||||
<option name="url" value="https://repo1.maven.org/maven2" />
|
|
||||||
</remote-repository>
|
|
||||||
<remote-repository>
|
<remote-repository>
|
||||||
<option name="id" value="jboss.community" />
|
<option name="id" value="jboss.community" />
|
||||||
<option name="name" value="JBoss Community repository" />
|
<option name="name" value="JBoss Community repository" />
|
||||||
<option name="url" value="https://repository.jboss.org/nexus/content/repositories/public/" />
|
<option name="url" value="https://repository.jboss.org/nexus/content/repositories/public/" />
|
||||||
</remote-repository>
|
</remote-repository>
|
||||||
|
<remote-repository>
|
||||||
|
<option name="id" value="34d16bdc-85f0-48ee-8e8b-144091765be1" />
|
||||||
|
<option name="name" value="34d16bdc-85f0-48ee-8e8b-144091765be1" />
|
||||||
|
<option name="url" value="https://repository.mulesoft.org/nexus/content/repositories/public/" />
|
||||||
|
</remote-repository>
|
||||||
</component>
|
</component>
|
||||||
</project>
|
</project>
|
||||||
6
.idea/libraries/apache_commons_csv.xml
generated
6
.idea/libraries/apache_commons_csv.xml
generated
@@ -1,8 +1,10 @@
|
|||||||
<component name="libraryTable">
|
<component name="libraryTable">
|
||||||
<library name="apache.commons.csv" type="repository">
|
<library name="apache.commons.csv" type="repository">
|
||||||
<properties maven-id="org.apache.commons:commons-csv:1.9.0" />
|
<properties maven-id="org.apache.commons:commons-csv:1.14.0" />
|
||||||
<CLASSES>
|
<CLASSES>
|
||||||
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-csv/1.9.0/commons-csv-1.9.0.jar!/" />
|
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-csv/1.14.0/commons-csv-1.14.0.jar!/" />
|
||||||
|
<root url="jar://$MAVEN_REPOSITORY$/commons-io/commons-io/2.18.0/commons-io-2.18.0.jar!/" />
|
||||||
|
<root url="jar://$MAVEN_REPOSITORY$/commons-codec/commons-codec/1.18.0/commons-codec-1.18.0.jar!/" />
|
||||||
</CLASSES>
|
</CLASSES>
|
||||||
<JAVADOC />
|
<JAVADOC />
|
||||||
<SOURCES />
|
<SOURCES />
|
||||||
|
|||||||
4
.idea/libraries/commons_cli.xml
generated
4
.idea/libraries/commons_cli.xml
generated
@@ -1,8 +1,8 @@
|
|||||||
<component name="libraryTable">
|
<component name="libraryTable">
|
||||||
<library name="commons.cli" type="repository">
|
<library name="commons.cli" type="repository">
|
||||||
<properties maven-id="commons-cli:commons-cli:1.5.0" />
|
<properties maven-id="commons-cli:commons-cli:1.9.0" />
|
||||||
<CLASSES>
|
<CLASSES>
|
||||||
<root url="jar://$MAVEN_REPOSITORY$/commons-cli/commons-cli/1.5.0/commons-cli-1.5.0.jar!/" />
|
<root url="jar://$MAVEN_REPOSITORY$/commons-cli/commons-cli/1.9.0/commons-cli-1.9.0.jar!/" />
|
||||||
</CLASSES>
|
</CLASSES>
|
||||||
<JAVADOC />
|
<JAVADOC />
|
||||||
<SOURCES />
|
<SOURCES />
|
||||||
|
|||||||
7
.idea/libraries/jgrapht_core.xml
generated
7
.idea/libraries/jgrapht_core.xml
generated
@@ -1,9 +1,10 @@
|
|||||||
<component name="libraryTable">
|
<component name="libraryTable">
|
||||||
<library name="jgrapht.core" type="repository">
|
<library name="jgrapht.core" type="repository">
|
||||||
<properties maven-id="org.jgrapht:jgrapht-core:1.5.1" />
|
<properties maven-id="org.jgrapht:jgrapht-core:1.5.2" />
|
||||||
<CLASSES>
|
<CLASSES>
|
||||||
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.1/jgrapht-core-1.5.1.jar!/" />
|
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.2/jgrapht-core-1.5.2.jar!/" />
|
||||||
<root url="jar://$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.13/jheaps-0.13.jar!/" />
|
<root url="jar://$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.14/jheaps-0.14.jar!/" />
|
||||||
|
<root url="jar://$MAVEN_REPOSITORY$/org/apfloat/apfloat/1.10.1/apfloat-1.10.1.jar!/" />
|
||||||
</CLASSES>
|
</CLASSES>
|
||||||
<JAVADOC />
|
<JAVADOC />
|
||||||
<SOURCES />
|
<SOURCES />
|
||||||
|
|||||||
15
.idea/libraries/jgrapht_io.xml
generated
15
.idea/libraries/jgrapht_io.xml
generated
@@ -1,13 +1,14 @@
|
|||||||
<component name="libraryTable">
|
<component name="libraryTable">
|
||||||
<library name="jgrapht.io" type="repository">
|
<library name="jgrapht.io" type="repository">
|
||||||
<properties maven-id="org.jgrapht:jgrapht-io:1.5.1" />
|
<properties maven-id="org.jgrapht:jgrapht-io:1.5.2" />
|
||||||
<CLASSES>
|
<CLASSES>
|
||||||
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-io/1.5.1/jgrapht-io-1.5.1.jar!/" />
|
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-io/1.5.2/jgrapht-io-1.5.2.jar!/" />
|
||||||
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.1/jgrapht-core-1.5.1.jar!/" />
|
<root url="jar://$MAVEN_REPOSITORY$/org/jgrapht/jgrapht-core/1.5.2/jgrapht-core-1.5.2.jar!/" />
|
||||||
<root url="jar://$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.13/jheaps-0.13.jar!/" />
|
<root url="jar://$MAVEN_REPOSITORY$/org/jheaps/jheaps/0.14/jheaps-0.14.jar!/" />
|
||||||
<root url="jar://$MAVEN_REPOSITORY$/org/antlr/antlr4-runtime/4.8-1/antlr4-runtime-4.8-1.jar!/" />
|
<root url="jar://$MAVEN_REPOSITORY$/org/apfloat/apfloat/1.10.1/apfloat-1.10.1.jar!/" />
|
||||||
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-text/1.8/commons-text-1.8.jar!/" />
|
<root url="jar://$MAVEN_REPOSITORY$/org/antlr/antlr4-runtime/4.12.0/antlr4-runtime-4.12.0.jar!/" />
|
||||||
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar!/" />
|
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-text/1.10.0/commons-text-1.10.0.jar!/" />
|
||||||
|
<root url="jar://$MAVEN_REPOSITORY$/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar!/" />
|
||||||
</CLASSES>
|
</CLASSES>
|
||||||
<JAVADOC />
|
<JAVADOC />
|
||||||
<SOURCES />
|
<SOURCES />
|
||||||
|
|||||||
44
pom.xml
44
pom.xml
@@ -5,7 +5,7 @@
|
|||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<groupId>org.example</groupId>
|
<groupId>org.example</groupId>
|
||||||
<artifactId>TCellSim</artifactId>
|
<artifactId>BiGpairSEQ_Sim</artifactId>
|
||||||
<version>1.0-SNAPSHOT</version>
|
<version>1.0-SNAPSHOT</version>
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
@@ -26,8 +26,48 @@
|
|||||||
<version>RELEASE</version>
|
<version>RELEASE</version>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-rng-simple -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.commons</groupId>
|
||||||
|
<artifactId>commons-rng-simple</artifactId>
|
||||||
|
<version>1.6</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.commons</groupId>
|
||||||
|
<artifactId>commons-rng-sampling</artifactId>
|
||||||
|
<version>1.6</version>
|
||||||
|
</dependency>
|
||||||
|
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-csv -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.commons</groupId>
|
||||||
|
<artifactId>commons-csv</artifactId>
|
||||||
|
<version>1.14.0</version>
|
||||||
|
</dependency>
|
||||||
|
<!-- https://mvnrepository.com/artifact/org.jgrapht/jgrapht-core -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.jgrapht</groupId>
|
||||||
|
<artifactId>jgrapht-core</artifactId>
|
||||||
|
<version>1.5.2</version>
|
||||||
|
</dependency>
|
||||||
|
<!-- https://mvnrepository.com/artifact/org.jgrapht/jgrapht-io -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.jgrapht</groupId>
|
||||||
|
<artifactId>jgrapht-io</artifactId>
|
||||||
|
<version>1.5.2</version>
|
||||||
|
</dependency>
|
||||||
|
<!-- https://mvnrepository.com/artifact/org.jheaps/jheaps -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.jheaps</groupId>
|
||||||
|
<artifactId>jheaps</artifactId>
|
||||||
|
<version>0.14</version>
|
||||||
|
</dependency>
|
||||||
|
<!-- https://mvnrepository.com/artifact/commons-cli/commons-cli -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>commons-cli</groupId>
|
||||||
|
<artifactId>commons-cli</artifactId>
|
||||||
|
<version>1.9.0</version>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<maven.compiler.source>11</maven.compiler.source>
|
<maven.compiler.source>11</maven.compiler.source>
|
||||||
<maven.compiler.target>11</maven.compiler.target>
|
<maven.compiler.target>11</maven.compiler.target>
|
||||||
|
|||||||
484
readme.md
484
readme.md
@@ -1,33 +1,113 @@
|
|||||||
# BiGpairSEQ SIMULATOR
|
# BiGpairSEQ SIMULATOR
|
||||||
|
|
||||||
|
## CONTENTS
|
||||||
|
1. [ABOUT](#about)
|
||||||
|
2. [THEORY](#theory)
|
||||||
|
3. [THE BiGpairSEQ ALGORITHM](#the-bigpairseq-algorithm)
|
||||||
|
4. [USAGE](#usage)
|
||||||
|
1. [RUNNING THE PROGRAM](#running-the-program)
|
||||||
|
2. [COMMAND LINE OPTIONS](#command-line-options)
|
||||||
|
3. [INTERACTIVE INTERFACE](#interactive-interface)
|
||||||
|
4. [INPUT/OUTPUT](#input-output)
|
||||||
|
1. [Cell Sample Files](#cell-sample-files)
|
||||||
|
2. [Sample Plate Files](#sample-plate-files)
|
||||||
|
3. [Graph/Data Files](#graph-data-files)
|
||||||
|
4. [Matching Results Files](#matching-results-files)
|
||||||
|
5. [RESULTS](#results)
|
||||||
|
1. [SAMPLE PLATES WITH VARYING NUMBERS OF CELLS PER WELL](#sample-plates-with-varying-numbers-of-cells-per-well)
|
||||||
|
2. [SIMULATING EXPERIMENTS FROM THE 2015 pairSEQ PAPER](#simulating-experiments-from-the-2015-pairseq-paper)
|
||||||
|
1. [EXPERIMENT 1](#experiment-1)
|
||||||
|
2. [EXPERIMENT 3](#experiment-3)
|
||||||
|
6. [CITATIONS](#citations)
|
||||||
|
7. [EXTERNAL LIBRARIES USED](#external-libraries-used)
|
||||||
|
8. [ACKNOWLEDGEMENTS](#acknowledgements)
|
||||||
|
9. [AUTHOR](#author)
|
||||||
|
10. [DISCLOSURE](#disclosure)
|
||||||
|
11. [TODO](#todo)
|
||||||
|
|
||||||
## ABOUT
|
## ABOUT
|
||||||
|
|
||||||
This program simulates BiGpairSEQ (Bipartite Graph pairSEQ), a graph theory-based adaptation
|
This program simulates BiGpairSEQ (Bipartite Graph pairSEQ), a graph theory-based adaptation
|
||||||
of the pairSEQ algorithm (Howie, et al. 2015) for pairing T cell receptor sequences.
|
of the pairSEQ algorithm ([Howie, et al. 2015](#citations)) for pairing T cell receptor sequences.
|
||||||
|
|
||||||
## THEORY
|
## THEORY
|
||||||
|
|
||||||
Unlike pairSEQ, which calculates p-values for every TCR alpha/beta overlap and compares
|
T cell receptors (TCRs) are encoded by pairs of sequences, alpha sequences (TCRAs) and beta sequences (TCRBs). These sequences
|
||||||
against a null distribution, BiGpairSEQ does not do any statistical calculations
|
are extremely diverse; to the first approximation, this pair of sequences uniquely identifies a line of T cells.
|
||||||
directly.
|
|
||||||
|
As described in the original 2015 paper, pairSEQ pairs TCRAs and TCRBs by distributing a
|
||||||
|
sample of T cells across a 96-well sample plate, then sequencing the contents of each well. It then calculates p-values for
|
||||||
|
every TCRA/TCRB sequence overlap and compares that against a null distribution, to find the most statistically probable pairings.
|
||||||
|
|
||||||
|
BiGpairSEQ uses the same fundamental idea of using occupancy overlap to pair TCR sequences, but unlike pairSEQ it
|
||||||
|
does not require performing any statistical calculations at all. Instead, BiGpairSEQ uses graph theory methods which
|
||||||
|
produce provably optimal solutions.
|
||||||
|
|
||||||
BiGpairSEQ creates a [weighted bipartite graph](https://en.wikipedia.org/wiki/Bipartite_graph) representing the sample plate.
|
BiGpairSEQ creates a [weighted bipartite graph](https://en.wikipedia.org/wiki/Bipartite_graph) representing the sample plate.
|
||||||
The distinct TCRA and TCRB sequences form the two sets of vertices. Every TCRA/TCRB pair that share a well
|
The distinct TCRA and TCRB sequences form the two sets of vertices. Every TCRA/TCRB pair that share a well on the sample plate
|
||||||
are connected by an edge, with the edge weight set to the number of wells in which both sequences appear.
|
are connected by an edge in the graph, with the edge weight set to the number of wells in which both sequences appear. The vertices
|
||||||
(Sequences present in *all* wells are filtered out prior to creating the graph, as there is no signal in their occupancy pattern.)
|
themselves are labeled with the occupancy data for the individual sequences they represent, which is useful for pre-filtering
|
||||||
The problem of pairing TCRA/TCRB sequences thus reduces to the "assignment problem" of finding a maximum weight
|
before finding a maximum weight matching. Such a graph fully encodes the distribution data from the sample plate.
|
||||||
matching on a bipartite graph--the subset of vertex-disjoint edges whose weights sum to the maximum possible value.
|
|
||||||
|
|
||||||
This is a well-studied combinatorial optimization problem, with many known solutions.
|
The problem of pairing TCRA/TCRB sequences thus reduces to the [assignment problem](https://en.wikipedia.org/wiki/Assignment_problem) of finding a maximum weight
|
||||||
The most efficient algorithm known to the author for maximum weight matching of a bipartite graph with strictly integral
|
matching (MWM) on a bipartite graph--the subset of vertex-disjoint edges whose weights sum to the maximum possible value.
|
||||||
weights is from Duan and Su (2012). For a graph with m edges, n vertices per side, and maximum integer edge weight N,
|
|
||||||
their algorithm runs in **O(m sqrt(n) log(N))** time. As the graph representation of a pairSEQ experiment is
|
|
||||||
bipartite with integer weights, this algorithm is ideal for BiGpairSEQ.
|
|
||||||
|
|
||||||
Unfortunately, it's a fairly new algorithm, and not yet implemented by the graph theory library used in this simulator.
|
This is a well-studied combinatorial optimization problem, with many known algorithms that produce
|
||||||
So this program instead uses the Fibonacci heap-based algorithm of Fredman and Tarjan (1987), which has a worst-case
|
provably-optimal solutions. The most theoretically efficient algorithm known to the author for maximum weight matching of a bipartite
|
||||||
runtime of **O(n (n log(n) + m))**. The algorithm is implemented as described in Melhorn and Näher (1999).
|
graph with strictly integral weights is from [Duan and Su (2012)](#citations). For a graph with m edges, n vertices per side,
|
||||||
|
and maximum integer edge weight N, their algorithm runs in **O(m sqrt(n) log(N))** time. As the graph representation of
|
||||||
|
a pairSEQ experiment is bipartite with integer weights, this algorithm seems ideal for BiGpairSEQ. Unfortunately, it is not
|
||||||
|
implemented by the graph theory library used in this simulator (JGraphT), and the author has not yet had time to write a
|
||||||
|
full, optimized implementation himself for testing.
|
||||||
|
|
||||||
|
So this program instead uses the [Fibonacci heap](https://en.wikipedia.org/wiki/Fibonacci_heap) based algorithm of Fredman and Tarjan (1987) (essentially
|
||||||
|
[the Hungarian algorithm](https://en.wikipedia.org/wiki/Hungarian_algorithm) augmented with a more efficient priority queue) which has a worst-case
|
||||||
|
runtime of **O(n (n log(n) + m))**. The algorithm is implemented as described in [Melhorn and Näher (1999)](#citations). (The simulator can use either a
|
||||||
|
Fibonacci heap or a [pairing heap](https://en.wikipedia.org/wiki/Pairing_heap) as desired. By default, a pairing heap is used,
|
||||||
|
as in practice they often offer superior performance.)
|
||||||
|
|
||||||
|
One possible advantage of this less efficient algorithm is that the Hungarian algorithm and its variations work with both the balanced and the unbalanced assignment problem
|
||||||
|
(that is, cases where both sides of the bipartite graph have the same number of vertices and those in which they don't.)
|
||||||
|
Many other MWM algorithms only work for the balanced assignment problem. While pairSEQ-style experiments should theoretically
|
||||||
|
be balanced assignment problems, in practice sequence dropout can cause them to be unbalanced. The unbalanced case
|
||||||
|
*can* be reduced to the balanced case, but doing so involves doubling the graph size. Since the current implementation uses only
|
||||||
|
the Hungarian algorithm, graph doubling--which could be challenging with the computational resources available to the
|
||||||
|
author--has not yet been necessary.
|
||||||
|
|
||||||
|
There have been some studies which show that [auction algorithms](https://en.wikipedia.org/wiki/Auction_algorithm) for the assignment problem can have superior performance in
|
||||||
|
real-world implementations, due to their simplicity, than more complex algorithms with better theoretical asymptotic
|
||||||
|
performance. The author has implemented a basic forward auction algorithm, which produces optimal assignment for unbalanced bipartite graphs with
|
||||||
|
integer weights. To allow for unbalanced assignment, this algorithm eschews epsilon-scaling,
|
||||||
|
and as a result is prone to "bidding-wars" which increase run time, making it less efficient than the implementation of
|
||||||
|
the Fredman-Tarjan algorithm in JGraphT. A forward/reverse auction algorithm as developed by Bertsekas and Castañon
|
||||||
|
should be able to handle unbalanced (or, as they call it, asymmetric) assignment much more efficiently, but has yet to be
|
||||||
|
implemented.
|
||||||
|
|
||||||
|
The relative time/space efficiencies of BiGpairSEQ when backed by different MWM algorithms remains an open problem.
|
||||||
|
|
||||||
|
## THE BiGpairSEQ ALGORITHM
|
||||||
|
|
||||||
|
1. Sequence a sample plate of T cells as in pairSEQ.
|
||||||
|
2. Pre-filter the sequence data to reduce error and minimize the size of the necessary graph.
|
||||||
|
1. *Saturating sequence filter*: remove any sequences present in all wells on the sample plate, as there is no signal in the occupancy data of saturating sequences (and each saturating sequence will have an edge to every vertex on the opposite side of the graph, vastly increasing the total graph size).
|
||||||
|
2. *Non-existent sequence filter*: sequencing misreads can pollute the data from the sample plate with non-existent sequences. These can be identified by the discrepancy between their occupancy and their total read count. Assuming sequences are read correctly at least half the time, then a sequence's total read count (R) should be at least half the well occupancy of that sequence (O) times the read depth of the sequencing run (D). Remove any sequences for which R < (O * D) / 2.
|
||||||
|
3. *Misidentified sequence filter*: sequencing misreads can cause one real sequence to be misidentified as a different real sequence. This should be fairly infrequent, but is a problem if it skews a sequence's overall occupancy pattern by causing the sequence to seem to be in a well where it's not. This can be detected by looking for discrepancies in a sequence's per-well read count. On average, the read count for a sequence in an individual well (r) should be equal to its total read count (R) divided by its total well occupancy (O). Remove from the list of wells occupied by a sequence any wells for which r < R / (2 * O).
|
||||||
|
3. Encode the occupancy data from the sample plate as a weighted bipartite graph, where one set of vertices represent the distinct TCRAs and the other set represents distinct TCRBs. Between any TCRA and TCRB that share a well, draw an edge. Assign that edge a weight equal to the total number of wells shared by both sequences.
|
||||||
|
4. Find a maximum weight matching of the bipartite graph, using any [MWM algorithm](https://en.wikipedia.org/wiki/Assignment_problem#Algorithms) that produces a provably optimal result.
|
||||||
|
* If desired, restrict the matching to a subset of the graph. (Example: restricting matching attempts to cases where the occupancy overlap is 4 or more wells--that is, edges with weight >= 4.0.) See below for discussion of why this might be desirable.
|
||||||
|
5. The resultant matching represents the likeliest TCRA/TCRB sequence pairs based on the occupancy pattern of the sample plate.
|
||||||
|
|
||||||
|
It is important to note that a maximum weight matching is not necessarily unique. If two different sets of vertex-disjoint edges
|
||||||
|
sum to the same maximal weight, then a MWM algorithms might find either one of them.
|
||||||
|
|
||||||
|
For example, consider a well that contains four rare sequences found only in that well, two TCRAs and two TCRBs.
|
||||||
|
In the graph, both of those TCRAs would have edges to both TCRBs (and to others of course, but since those edges will have a weight of 1.0,
|
||||||
|
they are unlikely be paired in a MWM to sequences with total occupancy of more than one well). If these four sequences
|
||||||
|
represent two unique T cells, then only one of the two possible pairings between these sequences is correct. But both
|
||||||
|
the correct and incorrect pairing will add 2.0 to the total graph weight, so either one could be part of a maximum weight matching.
|
||||||
|
|
||||||
|
It is to minimize the number of possible equivalent-weight matchings that one might restrict the algorithm to examining
|
||||||
|
only a subset of the graph, as described in step 4 above.
|
||||||
|
|
||||||
## USAGE
|
## USAGE
|
||||||
|
|
||||||
@@ -48,11 +128,119 @@ For example, to run the program with 32 gigabytes of memory, use the command:
|
|||||||
|
|
||||||
`java -Xmx32G -jar BiGpairSEQ_Sim.jar`
|
`java -Xmx32G -jar BiGpairSEQ_Sim.jar`
|
||||||
|
|
||||||
There are a number of command line options, to allow the program to be used in shell scripts. For a full list,
|
### COMMAND LINE OPTIONS
|
||||||
use the `-help` flag:
|
|
||||||
|
There are a number of command line options, to allow the program to be used in shell scripts. These can be viewed with
|
||||||
|
the `-help` flag:
|
||||||
|
|
||||||
`java -jar BiGpairSEQ_Sim.jar -help`
|
`java -jar BiGpairSEQ_Sim.jar -help`
|
||||||
|
|
||||||
|
```
|
||||||
|
usage: BiGpairSEQ_Sim.jar
|
||||||
|
-cells,--make-cells Makes a cell sample file of distinct T cells
|
||||||
|
-graph,--make-graph Makes a graph/data file. Requires a cell sample
|
||||||
|
file and a sample plate file
|
||||||
|
-help Displays this help menu
|
||||||
|
-match,--match-cdr3 Matches CDR3s. Requires a graph/data file.
|
||||||
|
-plate,--make-plate Makes a sample plate file. Requires a cell sample
|
||||||
|
file.
|
||||||
|
-version Prints the program version number to stdout
|
||||||
|
|
||||||
|
usage: BiGpairSEQ_Sim.jar -cells
|
||||||
|
-d,--diversity-factor <factor> The factor by which unique CDR3s
|
||||||
|
outnumber unique CDR1s
|
||||||
|
-n,--num-cells <number> The number of distinct cells to generate
|
||||||
|
-o,--output-file <filename> Name of output file
|
||||||
|
|
||||||
|
usage: BiGpairSEQ_Sim.jar -plate
|
||||||
|
-c,--cell-file <filename> The cell sample file to use
|
||||||
|
-d,--dropout-rate <rate> The sequence dropout rate due to
|
||||||
|
amplification error. (0.0 - 1.0)
|
||||||
|
-exp <value> If using -zipf flag, exponent value for
|
||||||
|
distribution
|
||||||
|
-exponential Use an exponential distribution for cell
|
||||||
|
sample
|
||||||
|
-gaussian Use a Gaussian distribution for cell sample
|
||||||
|
-lambda <value> If using -exponential flag, lambda value
|
||||||
|
for distribution
|
||||||
|
-o,--output-file <filename> Name of output file
|
||||||
|
-poisson Use a Poisson distribution for cell sample
|
||||||
|
-pop <number [number]...> The well populations for each section of
|
||||||
|
the sample plate. There will be as many
|
||||||
|
sections as there are populations given.
|
||||||
|
-random <min> <max> Randomize well populations on sample plate.
|
||||||
|
Takes two arguments: the minimum possible
|
||||||
|
population and the maximum possible
|
||||||
|
population.
|
||||||
|
-stddev <value> If using -gaussian flag, standard deviation
|
||||||
|
for distrbution
|
||||||
|
-w,--wells <number> The number of wells on the sample plate
|
||||||
|
-zipf Use a Zipf distribution for cell sample
|
||||||
|
|
||||||
|
usage: BiGpairSEQ_Sim.jar -graph
|
||||||
|
-c,--cell-file <filename> Cell sample file to use for
|
||||||
|
checking pairing accuracy
|
||||||
|
-err,--read-error-prob <prob> (Optional) The probability that
|
||||||
|
a sequence will be misread. (0.0
|
||||||
|
- 1.0)
|
||||||
|
-errcoll,--error-collision-prob <prob> (Optional) The probability that
|
||||||
|
two misreads will produce the
|
||||||
|
same spurious sequence. (0.0 -
|
||||||
|
1.0)
|
||||||
|
-graphml (Optional) Output GraphML file
|
||||||
|
-nb,--no-binary (Optional) Don't output
|
||||||
|
serialized binary file
|
||||||
|
-o,--output-file <filename> Name of output file
|
||||||
|
-p,--plate-filename <filename> Sample plate file from which to
|
||||||
|
construct graph
|
||||||
|
-rd,--read-depth <depth> (Optional) The number of times
|
||||||
|
to read each sequence.
|
||||||
|
-realcoll,--real-collision-prob <prob> (Optional) The probability that
|
||||||
|
a sequence will be misread as
|
||||||
|
another real sequence. (Only
|
||||||
|
applies to unique misreads;
|
||||||
|
after this has happened once,
|
||||||
|
future error collisions could
|
||||||
|
produce the real sequence again)
|
||||||
|
(0.0 - 1.0)
|
||||||
|
|
||||||
|
usage: BiGpairSEQ_Sim.jar -match
|
||||||
|
-g,--graph-file <filename> The graph/data file to use
|
||||||
|
-max <number> The maximum number of shared wells to
|
||||||
|
attempt to match a sequence pair
|
||||||
|
-maxdiff <number> (Optional) The maximum difference in total
|
||||||
|
occupancy between two sequences to attempt
|
||||||
|
matching.
|
||||||
|
-min <number> The minimum number of shared wells to
|
||||||
|
attempt to match a sequence pair
|
||||||
|
-minpct <percent> (Optional) The minimum percentage of a
|
||||||
|
sequence's total occupancy shared by
|
||||||
|
another sequence to attempt matching. (0 -
|
||||||
|
100)
|
||||||
|
-o,--output-file <filename> (Optional) Name of output the output file.
|
||||||
|
If not present, no file will be written.
|
||||||
|
--print-alphas (Optional) Print the number of distinct
|
||||||
|
alpha sequences to stdout.
|
||||||
|
--print-attempt (Optional) Print the pairing attempt rate
|
||||||
|
to stdout
|
||||||
|
--print-betas (Optional) Print the number of distinct
|
||||||
|
beta sequences to stdout.
|
||||||
|
--print-correct (Optional) Print the number of correct
|
||||||
|
pairs to stdout
|
||||||
|
--print-error (Optional) Print the pairing error rate to
|
||||||
|
stdout
|
||||||
|
--print-incorrect (Optional) Print the number of incorrect
|
||||||
|
pairs to stdout
|
||||||
|
--print-metadata (Optional) Print a full summary of the
|
||||||
|
matching results to stdout.
|
||||||
|
--print-time (Optional) Print the total simulation time
|
||||||
|
to stdout.
|
||||||
|
-pv,--p-value (Optional) Calculate p-values for sequence
|
||||||
|
pairs.
|
||||||
|
```
|
||||||
|
|
||||||
|
### INTERACTIVE INTERFACE
|
||||||
|
|
||||||
If no command line arguments are given, BiGpairSEQ_Sim will launch with an interactive, menu-driven CLI for
|
If no command line arguments are given, BiGpairSEQ_Sim will launch with an interactive, menu-driven CLI for
|
||||||
generating files and simulating TCR pairing. The main menu looks like this:
|
generating files and simulating TCR pairing. The main menu looks like this:
|
||||||
|
|
||||||
@@ -79,7 +267,8 @@ By default, the Options menu looks like this:
|
|||||||
3) Turn on graph/data file caching
|
3) Turn on graph/data file caching
|
||||||
4) Turn off serialized binary graph output
|
4) Turn off serialized binary graph output
|
||||||
5) Turn on GraphML graph output
|
5) Turn on GraphML graph output
|
||||||
6) Maximum weight matching algorithm options
|
6) Turn on calculation of p-values
|
||||||
|
7) Maximum weight matching algorithm options
|
||||||
0) Return to main menu
|
0) Return to main menu
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -96,7 +285,7 @@ These files are often generated in sequence. When entering filenames, it is not
|
|||||||
(.csv or .ser). When reading or writing files, the program will automatically add the correct extension to any filename
|
(.csv or .ser). When reading or writing files, the program will automatically add the correct extension to any filename
|
||||||
without one.
|
without one.
|
||||||
|
|
||||||
To save file I/O time, the most recent instance of each of these four
|
To save file I/O time when using the interactive interface, the most recent instance of each of these four
|
||||||
files either generated or read from disk can be cached in program memory. When caching is active, subsequent uses of the
|
files either generated or read from disk can be cached in program memory. When caching is active, subsequent uses of the
|
||||||
same data file won't need to be read in again until another file of that type is used or generated,
|
same data file won't need to be read in again until another file of that type is used or generated,
|
||||||
or caching is turned off for that file type. The program checks whether it needs to update its cached data by comparing
|
or caching is turned off for that file type. The program checks whether it needs to update its cached data by comparing
|
||||||
@@ -116,7 +305,7 @@ turned on in the Options menu. By default, GraphML output is OFF.
|
|||||||
Cell Sample files consist of any number of distinct "T cells." Every cell contains
|
Cell Sample files consist of any number of distinct "T cells." Every cell contains
|
||||||
four sequences: Alpha CDR3, Beta CDR3, Alpha CDR1, Beta CDR1. The sequences are represented by
|
four sequences: Alpha CDR3, Beta CDR3, Alpha CDR1, Beta CDR1. The sequences are represented by
|
||||||
random integers. CDR3 Alpha and Beta sequences are all unique within a given Cell Sample file. CDR1 Alpha and Beta sequences
|
random integers. CDR3 Alpha and Beta sequences are all unique within a given Cell Sample file. CDR1 Alpha and Beta sequences
|
||||||
are not necessarily unique; the relative diversity can be set when making the file.
|
are not necessarily unique; the relative diversity of CRD1s with respect to CDR3s can be set when making the file.
|
||||||
|
|
||||||
(Note: though cells still have CDR1 sequences, matching of CDR1s is currently awaiting re-implementation.)
|
(Note: though cells still have CDR1 sequences, matching of CDR1s is currently awaiting re-implementation.)
|
||||||
|
|
||||||
@@ -133,7 +322,7 @@ Structure:
|
|||||||
| Alpha CDR3 | Beta CDR3 | Alpha CDR1 | Beta CDR1 |
|
| Alpha CDR3 | Beta CDR3 | Alpha CDR1 | Beta CDR1 |
|
||||||
|---|---|---|---|
|
|---|---|---|---|
|
||||||
|unique number|unique number|number|number|
|
|unique number|unique number|number|number|
|
||||||
|
| ... | ... |... | ... |
|
||||||
---
|
---
|
||||||
|
|
||||||
#### Sample Plate Files
|
#### Sample Plate Files
|
||||||
@@ -142,7 +331,8 @@ described above). The wells are filled randomly from a Cell Sample file, accordi
|
|||||||
frequency distribution. Additionally, every individual sequence within each cell may, with some
|
frequency distribution. Additionally, every individual sequence within each cell may, with some
|
||||||
given dropout probability, be omitted from the file; this simulates the effect of amplification errors
|
given dropout probability, be omitted from the file; this simulates the effect of amplification errors
|
||||||
prior to sequencing. Plates can also be partitioned into any number of sections, each of which can have a
|
prior to sequencing. Plates can also be partitioned into any number of sections, each of which can have a
|
||||||
different concentration of T cells per well.
|
different concentration of T cells per well. Alternatively, the number of T cells in each well can be randomized between
|
||||||
|
given minimum and maximum population values.
|
||||||
|
|
||||||
Options when making a Sample Plate file:
|
Options when making a Sample Plate file:
|
||||||
* Cell Sample file to use
|
* Cell Sample file to use
|
||||||
@@ -152,7 +342,8 @@ Options when making a Sample Plate file:
|
|||||||
* Standard deviation size
|
* Standard deviation size
|
||||||
* Exponential
|
* Exponential
|
||||||
* Lambda value
|
* Lambda value
|
||||||
* *(Based on the slope of the graph in Figure 4C of the pairSEQ paper, the distribution of the original experiment was approximately exponential with a lambda ~0.6. (Howie, et al. 2015))*
|
* Zipf
|
||||||
|
* Exponent value
|
||||||
* Total number of wells on the plate
|
* Total number of wells on the plate
|
||||||
* Well populations random or fixed
|
* Well populations random or fixed
|
||||||
* If random, minimum and maximum population sizes
|
* If random, minimum and maximum population sizes
|
||||||
@@ -160,7 +351,7 @@ Options when making a Sample Plate file:
|
|||||||
* Number of sections on plate
|
* Number of sections on plate
|
||||||
* Number of T cells per well
|
* Number of T cells per well
|
||||||
* per section, if more than one section
|
* per section, if more than one section
|
||||||
* Dropout rate
|
* Sequence dropout rate
|
||||||
|
|
||||||
Files are in CSV format. There are no header labels. Every row represents a well.
|
Files are in CSV format. There are no header labels. Every row represents a well.
|
||||||
Every value represents an individual cell, containing four sequences, depicted as an array string:
|
Every value represents an individual cell, containing four sequences, depicted as an array string:
|
||||||
@@ -199,11 +390,12 @@ then use it for multiple different BiGpairSEQ simulations.
|
|||||||
|
|
||||||
Options for creating a Graph/Data file:
|
Options for creating a Graph/Data file:
|
||||||
* The Cell Sample file to use
|
* The Cell Sample file to use
|
||||||
* The Sample Plate file to use. (This must have been generated from the selected Cell Sample file.)
|
* The Sample Plate file to use (This must have been generated from the selected Cell Sample file.)
|
||||||
* Whether to simulate sequence read depth. If simulated:
|
* Whether to simulate sequencing read depth. If simulated:
|
||||||
* The read depth (number of times each sequence is read)
|
* The read depth (The number of times each sequence is read)
|
||||||
* The read error rate (probability a sequence is misread)
|
* The read error rate (The probability a sequence is misread)
|
||||||
* The error collision rate (probability two misreads produce the same spurious sequence)
|
* The error collision rate (The probability two misreads produce the same spurious sequence)
|
||||||
|
* The real sequence collision rate (The probability that a misread will produce a different, real sequence from the sample plate. Only applies to new misreads; once an error of this type has occurred, it's likelihood of occurring again is dominated by the error collision probability.)
|
||||||
|
|
||||||
These files do not have a human-readable structure, and are not portable to other programs.
|
These files do not have a human-readable structure, and are not portable to other programs.
|
||||||
|
|
||||||
@@ -211,8 +403,8 @@ These files do not have a human-readable structure, and are not portable to othe
|
|||||||
|
|
||||||
For portability of graph data to other software, turn on [GraphML](http://graphml.graphdrawing.org/index.html) output
|
For portability of graph data to other software, turn on [GraphML](http://graphml.graphdrawing.org/index.html) output
|
||||||
in the Options menu in interactive mode, or use the `-graphml`command line argument. This will produce a .graphml file
|
in the Options menu in interactive mode, or use the `-graphml`command line argument. This will produce a .graphml file
|
||||||
for the weighted graph, with vertex attributes for sequence, type, and occupancy data. This graph contains all the data
|
for the weighted graph, with vertex attributes for sequence, type, total occupancy, total read count, and the read count for every individual occupied well.
|
||||||
necessary for the BiGpairSEQ matching algorithm. It does not include the data to measure pairing accuracy; for that,
|
This graph contains all the data necessary for the BiGpairSEQ matching algorithm. It does not include the data to measure pairing accuracy; for that,
|
||||||
compare the matching results to the original Cell Sample .csv file.
|
compare the matching results to the original Cell Sample .csv file.
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -220,7 +412,7 @@ compare the matching results to the original Cell Sample .csv file.
|
|||||||
#### Matching Results Files
|
#### Matching Results Files
|
||||||
Matching results files consist of the results of a BiGpairSEQ matching simulation. Making them requires a serialized
|
Matching results files consist of the results of a BiGpairSEQ matching simulation. Making them requires a serialized
|
||||||
binary Graph/Data file (.ser). (Because .graphML files are larger than .ser files, BiGpairSEQ_Sim supports .graphML
|
binary Graph/Data file (.ser). (Because .graphML files are larger than .ser files, BiGpairSEQ_Sim supports .graphML
|
||||||
output only. Graph/data input must use a serialized binary.)
|
output only. Graph input must use a serialized binary.)
|
||||||
|
|
||||||
Matching results files are in CSV format. Rows are sequence pairings with extra relevant data. Columns are pairing-specific details.
|
Matching results files are in CSV format. Rows are sequence pairings with extra relevant data. Columns are pairing-specific details.
|
||||||
Metadata about the matching simulation is included as comments. Comments are preceded by `#`.
|
Metadata about the matching simulation is included as comments. Comments are preceded by `#`.
|
||||||
@@ -238,56 +430,66 @@ Options when running a BiGpairSEQ simulation of CDR3 alpha/beta matching:
|
|||||||
Example output:
|
Example output:
|
||||||
|
|
||||||
```
|
```
|
||||||
# Source Sample Plate file: 4MilCellsPlate.csv
|
# cell sample filename: 8MilCells.csv
|
||||||
# Source Graph and Data file: 4MilCellsPlateGraph.ser
|
# cell sample size: 8000000
|
||||||
# T cell counts in sample plate wells: 30000
|
# sample plate filename: 8MilCells_Plate.csv
|
||||||
# Total alphas found: 11813
|
# sample plate well count: 96
|
||||||
# Total betas found: 11808
|
# sequence dropout rate: 0.1
|
||||||
# High overlap threshold: 94
|
# graph filename: 8MilGraph_rd2
|
||||||
# Low overlap threshold: 3
|
# MWM algorithm type: LEDA book with heap: FIBONACCI
|
||||||
# Minimum overlap percent: 0
|
# matching weight: 218017.0
|
||||||
# Maximum occupancy difference: 96
|
# well populations: 4000
|
||||||
# Pairing attempt rate: 0.438
|
# sequence read depth: 100
|
||||||
# Correct pairings: 5151
|
# sequence read error rate: 0.01
|
||||||
# Incorrect pairings: 18
|
# read error collision rate: 0.1
|
||||||
# Pairing error rate: 0.00348
|
# real sequence collision rate: 0.05
|
||||||
# Simulation time: 862 seconds
|
# total alphas read from plate: 323711
|
||||||
|
# total betas read from plate: 323981
|
||||||
|
# alphas in graph (after pre-filtering): 11707
|
||||||
|
# betas in graph (after pre-filtering): 11705
|
||||||
|
# high overlap threshold for pairing: 95
|
||||||
|
# low overlap threshold for pairing: 3
|
||||||
|
# minimum overlap percent for pairing: 0
|
||||||
|
# maximum occupancy difference for pairing: 2147483647
|
||||||
|
# pairing attempt rate: 0.716
|
||||||
|
# correct pairing count: 8373
|
||||||
|
# incorrect pairing count: 7
|
||||||
|
# pairing error rate: 0.000835
|
||||||
|
# time to generate graph (seconds): 293
|
||||||
|
# time to pair sequences (seconds): 1,416
|
||||||
|
# total simulation time (seconds): 1,709
|
||||||
```
|
```
|
||||||
|
|
||||||
| Alpha | Alpha well count | Beta | Beta well count | Overlap count | Matched Correctly? | P-value |
|
| Alpha | Alpha well count | Beta | Beta well count | Overlap count | Matched Correctly? | P-value |
|
||||||
|---|---|---|---|---|---|---|
|
|---|---|---|---|---|---|---|
|
||||||
|5242972|17|1571520|18|17|true|1.41E-18|
|
|10258642|73|1172093|72|70.0|true|4.19E-21|
|
||||||
|5161027|18|2072219|18|18|true|7.31E-20|
|
|6186865|34|4290363|37|34.0|true|4.56E-26|
|
||||||
|4145198|33|1064455|30|29|true|2.65E-21|
|
|10222686|70|11044018|72|68.0|true|9.55E-25|
|
||||||
|7700582|18|112748|18|18|true|7.31E-20|
|
|5338100|75|2422988|76|74.0|true|4.57E-22|
|
||||||
|
|12363907|33|6569852|35|33.0|true|5.70E-26|
|
||||||
|...|...|...|...|...|...|...|
|
|...|...|...|...|...|...|...|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**NOTE: The p-values in the output are not used for matching**—they aren't part of the BiGpairSEQ algorithm at all.
|
**NOTE: The p-values in the sample output above are not used for matching**—they aren't part of the BiGpairSEQ algorithm at all.
|
||||||
P-values are calculated *after* BiGpairSEQ matching is completed, for purposes of comparison only,
|
P-values (if enabled in the interactive menu options or by using the -pv flag on the command line) are calculated *after*
|
||||||
using the (2021 corrected) formula from the original pairSEQ paper. (Howie, et al. 2015)
|
BiGpairSEQ matching is completed, for purposes of comparison with pairSEQ only, using the (corrected) formula from the
|
||||||
|
original pairSEQ paper. (Howie, et al. 2015) Calculation of p-values is off by default to reduce processing time.
|
||||||
|
|
||||||
|
|
||||||
## PERFORMANCE (old results; need updating to reflect current, improved simulator performance)
|
## RESULTS
|
||||||
|
|
||||||
On a home computer with a Ryzen 5600X CPU, 64GB of 3200MHz DDR4 RAM (half of which was allocated to the Java Virtual Machine), and a PCIe 3.0 SSD, running Linux Mint 20.3 Edge (5.13 kernel),
|
Several BiGpairSEQ simulations were performed on a home computer with the following specs:
|
||||||
the author ran a BiGpairSEQ simulation of a 96-well sample plate with 30,000 T cells/well comprising ~11,800 alphas and betas,
|
|
||||||
taken from a sample of 4,000,000 distinct cells with an exponential frequency distribution (lambda 0.6).
|
|
||||||
|
|
||||||
With min/max occupancy threshold of 3 and 94 wells for matching, and no other pre-filtering, BiGpairSEQ identified 5,151
|
* Ryzen 5600X CPU
|
||||||
correct pairings and 18 incorrect pairings, for an accuracy of 99.652%.
|
* 128GB of 3200MHz DDR4 RAM
|
||||||
|
* 2TB PCIe 3.0 SSD
|
||||||
|
* Linux Mint 21 (5.15 kernel)
|
||||||
|
|
||||||
The total simulation time was 14'22". If intermediate results were held in memory, this would be equivalent to the total elapsed time.
|
### SAMPLE PLATES WITH VARYING NUMBERS OF CELLS PER WELL
|
||||||
|
|
||||||
Since this implementation of BiGpairSEQ writes intermediate results to disk (to improve the efficiency of *repeated* simulations
|
The probability calculations used by pairSEQ require that every well on the sample plate contain the same number of T cells.
|
||||||
with different filtering options), the actual elapsed time was greater. File I/O time was not measured, but took
|
BiGpairSEQ does not share this limitation; it is robust to variations in the number of cells per well.
|
||||||
slightly less time than the simulation itself. Real elapsed time from start to finish was under 30 minutes.
|
|
||||||
|
|
||||||
As mentioned in the theory section, performance could be improved by implementing a more efficient algorithm for finding
|
|
||||||
the maximum weight matching.
|
|
||||||
|
|
||||||
## BEHAVIOR WITH RANDOMIZED WELL POPULATIONS
|
|
||||||
|
|
||||||
A series of BiGpairSEQ simulations were conducted using a cell sample file of 3.5 million unique T cells. From these cells,
|
A series of BiGpairSEQ simulations were conducted using a cell sample file of 3.5 million unique T cells. From these cells,
|
||||||
10 sample plate files were created. All of these sample plates had 96 wells, used an exponential distribution with a lambda of 0.6, and
|
10 sample plate files were created. All of these sample plates had 96 wells, used an exponential distribution with a lambda of 0.6, and
|
||||||
@@ -304,6 +506,9 @@ The well populations of the plates were:
|
|||||||
All BiGpairSEQ simulations were run with a low overlap threshold of 3 and a high overlap threshold of 94.
|
All BiGpairSEQ simulations were run with a low overlap threshold of 3 and a high overlap threshold of 94.
|
||||||
No optional filters were used, so pairing was attempted for all sequences with overlaps within the threshold values.
|
No optional filters were used, so pairing was attempted for all sequences with overlaps within the threshold values.
|
||||||
|
|
||||||
|
NOTE: these results were obtained with an earlier version of BiGpairSEQ_Sim, and should be re-run with the current version.
|
||||||
|
The observed behavior is not believed to be likely to change, however.
|
||||||
|
|
||||||
Constant well population plate results:
|
Constant well population plate results:
|
||||||
|
|
||||||
| |1000 Cell/Well Plate|2000 Cell/Well Plate|3000 Cell/Well Plate|4000 Cell/Well Plate|5000 Cell/Well Plate
|
| |1000 Cell/Well Plate|2000 Cell/Well Plate|3000 Cell/Well Plate|4000 Cell/Well Plate|5000 Cell/Well Plate
|
||||||
@@ -332,8 +537,107 @@ The average results for the randomized plates are closest to the constant plate
|
|||||||
This and several other tests indicate that BiGpairSEQ treats a sample plate with a highly variable number of T cells/well
|
This and several other tests indicate that BiGpairSEQ treats a sample plate with a highly variable number of T cells/well
|
||||||
roughly as though it had a constant well population equal to the plate's average well population.
|
roughly as though it had a constant well population equal to the plate's average well population.
|
||||||
|
|
||||||
|
### SIMULATING EXPERIMENTS FROM THE 2015 pairSEQ PAPER
|
||||||
|
#### Experiment 1
|
||||||
|
This simulation was an attempt to replicate the conditions of experiment 1 from the 2015 pairSEQ paper: a matching was found for a
|
||||||
|
96-well sample plate with 4,000 T cells/well, taken from a sample of 8,400,000
|
||||||
|
distinct cells sampled with an exponential frequency distribution. Examination of Figure 4C from the paper seems to show the points
|
||||||
|
(-5, 4) and (-4.5, 3.3) on the line at the boundary of the shaded region, so a lambda value of 1.4 was used for the
|
||||||
|
exponential distribution.
|
||||||
|
|
||||||
|
The sequence dropout rate was 10%, as the analysis in the paper concluded that most TCR
|
||||||
|
sequences "have less than a 10% chance of going unobserved." (Howie, et al. 2015) Given this choice of 10%, the simulated
|
||||||
|
sample plate is likely to have more sequence dropout, and thus greater error, than the real experiment.
|
||||||
|
|
||||||
|
The original paper does not contain (or the author of this document failed to identify) information on sequencing depth,
|
||||||
|
read error probability, or the probabilities of different kinds of read error collisions. As the pre-filtering of BiGpairSEQ
|
||||||
|
has successfully filtered out all such errors for any reasonable error rates the author has yet tested, this simulation was
|
||||||
|
done without simulating any sequencing errors, to reduce the processing time.
|
||||||
|
|
||||||
|
This simulation was performed 5 times with min/max occupancy thresholds of 3 and 95 wells respectively for matching.
|
||||||
|
|
||||||
|
| |Run 1|Run 2|Run 3|Run 4|Run 5| Average|
|
||||||
|
|---|---|---|---|---|---|---|
|
||||||
|
|Total pairs|4398|4420|4404|4409|4414|4409|
|
||||||
|
|Correct pairs|4322|4313|4337|4336|4339|4329.4|
|
||||||
|
|Incorrect pairs|76|107|67|73|75|79.6|
|
||||||
|
|Error rate|0.0173|0.0242|0.0152|0.0166|0.0170|0.018|
|
||||||
|
|Simulation time (seconds)|697|484|466|473|463|516.6|
|
||||||
|
|
||||||
|
The experiment in the original paper called 4143 pairs with a false discovery rate of 0.01.
|
||||||
|
|
||||||
|
Given the roughness of the estimation for the cell frequency distribution of the original experiment and the likely
|
||||||
|
higher rate of sequence dropout in the simulation, these simulated results match the real experiment fairly well.
|
||||||
|
|
||||||
|
#### Experiment 3
|
||||||
|
To simulate experiment 3 from the original paper, a matching was made for a 96-well sample plate with 160,000 T cells/well,
|
||||||
|
taken from a sample of 4.5 million distinct T cells sampled with an exponential frequency distribution (lambda 1.4). The
|
||||||
|
sequence dropout rate was again 10%, and no sequencing errors were simulated. Once again, deviation from the original
|
||||||
|
experiment is expected due to the roughness of the estimated frequency distribution, and due to the high sequence dropout
|
||||||
|
rate.
|
||||||
|
|
||||||
|
Results metadata:
|
||||||
|
```
|
||||||
|
# total alphas read from plate: 6929
|
||||||
|
# total betas read from plate: 6939
|
||||||
|
# alphas in graph (after pre-filtering): 4452
|
||||||
|
# betas in graph (after pre-filtering): 4461
|
||||||
|
# high overlap threshold for pairing: 95
|
||||||
|
# low overlap threshold for pairing: 3
|
||||||
|
# minimum overlap percent for pairing: 0
|
||||||
|
# maximum occupancy difference for pairing: 100
|
||||||
|
# pairing attempt rate: 0.767
|
||||||
|
# correct pairing count: 3233
|
||||||
|
# incorrect pairing count: 182
|
||||||
|
# pairing error rate: 0.0533
|
||||||
|
# time to generate graph (seconds): 40
|
||||||
|
# time to pair sequences (seconds): 230
|
||||||
|
# total simulation time (seconds): 270
|
||||||
|
```
|
||||||
|
|
||||||
|
The simulation ony found 6929 distinct TCRAs and 6939 TCRBs on the sample plate, orders of magnitude fewer than the number of
|
||||||
|
pairs called in the pairSEQ experiment. These results show that at very high sampling depths, the differences in the
|
||||||
|
underlying frequency distribution drastically affect the results. The real distribution clearly has a much longer "tail"
|
||||||
|
than the simulated exponential distribution. Implementing a way to exert finer control over the sampling distribution from
|
||||||
|
the file of distinct cells may enable better simulated replication of this experiment.
|
||||||
|
|
||||||
|
## CITATIONS
|
||||||
|
* Howie, B., Sherwood, A. M., et al. ["High-throughput pairing of T cell receptor alpha and beta sequences."](https://pubmed.ncbi.nlm.nih.gov/26290413/) Sci. Transl. Med. 7, 301ra131 (2015)
|
||||||
|
* Duan, R., Su H. ["A Scaling Algorithm for Maximum Weight Matching in Bipartite Graphs."](https://web.eecs.umich.edu/~pettie/matching/Duan-Su-scaling-bipartite-matching.pdf) Proceedings of the Twenty-Third Annual ACM-SIAM Symposium on Discrete Algorithms, p. 1413-1424. (2012)
|
||||||
|
* Melhorn, K., Näher, St. [The LEDA Platform of Combinatorial and Geometric Computing.](https://people.mpi-inf.mpg.de/~mehlhorn/LEDAbook.html) Cambridge University Press. Chapter 7, Graph Algorithms; p. 132-162 (1999)
|
||||||
|
* Fredman, M., Tarjan, R. ["Fibonacci heaps and their uses in improved network optimization algorithms."](https://www.cl.cam.ac.uk/teaching/1011/AlgorithII/1987-FredmanTar-fibonacci.pdf) J. ACM, 34(3):596–615 (1987))
|
||||||
|
* Bertsekas, D., Castañon, D. ["A forward/reverse auction algorithm for asymmetric assignment problems."](https://www.mit.edu/~dimitrib/For_Rev_Asym_Auction.pdf) Computational Optimization and Applications 1, 277-297 (1992)
|
||||||
|
* Dimitrios Michail, Joris Kinable, Barak Naveh, and John V. Sichi. ["JGraphT—A Java Library for Graph Data Structures and Algorithms."](https://dl.acm.org/doi/10.1145/3381449) ACM Trans. Math. Softw. 46, 2, Article 16 (2020)
|
||||||
|
|
||||||
|
## EXTERNAL LIBRARIES USED
|
||||||
|
* [JGraphT](https://jgrapht.org) -- Graph theory data structures and algorithms
|
||||||
|
* [JHeaps](https://www.jheaps.org) -- For pairing heap priority queue used in maximum weight matching algorithm
|
||||||
|
* [Apache Commons CSV](https://commons.apache.org/proper/commons-csv/) -- For CSV file output
|
||||||
|
* [Apache Commons CLI](https://commons.apache.org/proper/commons-cli/) -- To enable command line arguments for scripting.
|
||||||
|
|
||||||
|
## ACKNOWLEDGEMENTS
|
||||||
|
BiGpairSEQ was conceived in collaboration with the author's spouse, Dr. Alice MacQueen, who brought the original
|
||||||
|
pairSEQ paper to the author's attention and explained all the biology terms he didn't know.
|
||||||
|
|
||||||
|
## AUTHOR
|
||||||
|
BiGpairSEQ algorithm and simulation by Eugene Fischer, 2021. Improvements and documentation, 2022–2025.
|
||||||
|
|
||||||
|
## DISCLOSURE
|
||||||
|
The earliest versions of the BiGpairSEQ simulator were written in 2021 to let Dr. MacQueen test hypothetical extensions
|
||||||
|
of the published pairSEQ protocol while she was interviewing for a position at Adaptive Biotechnologies. She was
|
||||||
|
employed at Adaptive Biotechnologies starting in 2022.
|
||||||
|
|
||||||
|
The author has worked on this BiGpairSEQ simulator since 2021 without Dr. MacQueen's involvement, since she has had
|
||||||
|
access to related, proprietary technologies. The author has had no such access, relying exclusively on the 2015 pairSEQ
|
||||||
|
paper and other academic publications. He continues to work on the BiGpairSEQ simulator recreationally, as it has been
|
||||||
|
a means of exploring some very beautiful math.
|
||||||
|
|
||||||
## TODO
|
## TODO
|
||||||
|
|
||||||
|
* Consider whether a graph database might be a better option than keeping things in memory.
|
||||||
|
* Look at fastUtil for more performant maps and arrays. Note that there is an optional jGraphT library to work with fastUtil (see FastutilMapIntVertexGraph, for example).
|
||||||
|
* Consider implementing an option to use the jGrapht sparse graph representation for a lower memory cost with very large graphs (tens or hundreds of thousands of distinct sequences).
|
||||||
|
* ~~Update CLI option text in this readme to include Zipf distribution options~~
|
||||||
* ~~Try invoking GC at end of workloads to reduce paging to disk~~ DONE
|
* ~~Try invoking GC at end of workloads to reduce paging to disk~~ DONE
|
||||||
* ~~Hold graph data in memory until another graph is read-in? ABANDONED UNABANDONED~~ DONE
|
* ~~Hold graph data in memory until another graph is read-in? ABANDONED UNABANDONED~~ DONE
|
||||||
* ~~*No, this won't work, because BiGpairSEQ simulations alter the underlying graph based on filtering constraints. Changes would cascade with multiple experiments.*~~
|
* ~~*No, this won't work, because BiGpairSEQ simulations alter the underlying graph based on filtering constraints. Changes would cascade with multiple experiments.*~~
|
||||||
@@ -342,7 +646,7 @@ roughly as though it had a constant well population equal to the plate's average
|
|||||||
* ~~Test whether pairing heap (currently used) or Fibonacci heap is more efficient for priority queue in current matching algorithm~~ DONE
|
* ~~Test whether pairing heap (currently used) or Fibonacci heap is more efficient for priority queue in current matching algorithm~~ DONE
|
||||||
* ~~in theory Fibonacci heap should be more efficient, but complexity overhead may eliminate theoretical advantage~~
|
* ~~in theory Fibonacci heap should be more efficient, but complexity overhead may eliminate theoretical advantage~~
|
||||||
* ~~Add controllable heap-type parameter?~~
|
* ~~Add controllable heap-type parameter?~~
|
||||||
* Parameter implemented. Fibonacci heap the current default.
|
* Parameter implemented. Pairing heap the current default.
|
||||||
* ~~Implement sample plates with random numbers of T cells per well.~~ DONE
|
* ~~Implement sample plates with random numbers of T cells per well.~~ DONE
|
||||||
* Possible BiGpairSEQ advantage over pairSEQ: BiGpairSEQ is resilient to variations in well population sizes on a sample plate; pairSEQ is not due to nature of probability calculations.
|
* Possible BiGpairSEQ advantage over pairSEQ: BiGpairSEQ is resilient to variations in well population sizes on a sample plate; pairSEQ is not due to nature of probability calculations.
|
||||||
* preliminary data suggests that BiGpairSEQ behaves roughly as though the whole plate had whatever the *average* well concentration is, but that's still speculative.
|
* preliminary data suggests that BiGpairSEQ behaves roughly as though the whole plate had whatever the *average* well concentration is, but that's still speculative.
|
||||||
@@ -361,40 +665,26 @@ roughly as though it had a constant well population equal to the plate's average
|
|||||||
* ~~Add read depth simulation options to CLI~~ DONE
|
* ~~Add read depth simulation options to CLI~~ DONE
|
||||||
* ~~Update graphml output to reflect current Vertex class attributes~~ DONE
|
* ~~Update graphml output to reflect current Vertex class attributes~~ DONE
|
||||||
* Individual well data from the SequenceRecords could be included, if there's ever a reason for it
|
* Individual well data from the SequenceRecords could be included, if there's ever a reason for it
|
||||||
|
* ~~Implement simulation of sequences being misread as other real sequence~~ DONE
|
||||||
|
* Implement redistributive heap for LEDA matching algorithm to achieve theoretical worst case of O(n(m + n log C)) where C is highest edge weight.
|
||||||
* Update matching metadata output options in CLI
|
* Update matching metadata output options in CLI
|
||||||
|
* Add frequency distribution details to metadata output
|
||||||
|
* need to make an enum for the different distribution types and refactor the Plate class and user interfaces, also add the necessary fields to GraphWithMapData and then call if from Simulator
|
||||||
* Update performance data in this readme
|
* Update performance data in this readme
|
||||||
|
* ~~Add section to ReadMe describing data filtering methods.~~ DONE, now part of algorithm description
|
||||||
* Re-implement CDR1 matching method
|
* Re-implement CDR1 matching method
|
||||||
* Refactor simulator code to collect all needed data in a single scan of the plate
|
* ~~Refactor simulator code to collect all needed data in a single scan of the plate~~ DONE
|
||||||
* Currently it scans once for the vertices and then again for the edge weights. This made simulating read depth awkward, and incompatible with caching of plate files.
|
* ~~Currently it scans once for the vertices and then again for the edge weights. This made simulating read depth awkward, and incompatible with caching of plate files.~~
|
||||||
* This would be a fairly major rewrite of the simulator code, but could make things faster, and would definitely make them cleaner.
|
* ~~This would be a fairly major rewrite of the simulator code, but could make things faster, and would definitely make them cleaner.~~
|
||||||
* Implement Duan and Su's maximum weight matching algorithm
|
* Implement Duan and Su's maximum weight matching algorithm
|
||||||
* Add controllable algorithm-type parameter?
|
* ~~Add controllable algorithm-type parameter?~~ DONE
|
||||||
* This would be fun and valuable, but probably take more time than I have for a hobby project.
|
* This would be fun and valuable, but probably take more time than I have for a hobby project.
|
||||||
* Implement an auction algorithm for maximum weight matching
|
* ~~Implement an auction algorithm for maximum weight matching~~ DONE
|
||||||
|
* Implement a forward/reverse auction algorithm for maximum weight matching
|
||||||
* Implement an algorithm for approximating a maximum weight matching
|
* Implement an algorithm for approximating a maximum weight matching
|
||||||
* Some of these run in linear or near-linear time
|
* Some of these run in linear or near-linear time
|
||||||
* given that the underlying biological samples have many, many sources of error, this would probably be the most useful option in practice. It seems less mathematically elegant, though, and so less fun for me.
|
* given that the underlying biological samples have many, many sources of error, this would probably be the most useful option in practice. It seems less mathematically elegant, though, and so less fun for me.
|
||||||
* Implement Vose's alias method for arbitrary statistical distributions of cells
|
* Implement Vose's alias method for arbitrary statistical distributions of cells
|
||||||
* Should probably refactor to use apache commons rng for this
|
* Should probably refactor to use apache commons rng for this
|
||||||
* Use commons JCS for caching
|
* Use commons JCS for caching
|
||||||
* Parameterize pre-filtering. Currently, sequences present in all wells are filtered out before constructing the graph, which massively reduces graph size. But, ideally, no pre-filtering would be necessary.
|
* Parameterize pre-filtering options
|
||||||
|
|
||||||
|
|
||||||
## CITATIONS
|
|
||||||
* Howie, B., Sherwood, A. M., et al. ["High-throughput pairing of T cell receptor alpha and beta sequences."](https://pubmed.ncbi.nlm.nih.gov/26290413/) Sci. Transl. Med. 7, 301ra131 (2015)
|
|
||||||
* Duan, R., Su H. ["A Scaling Algorithm for Maximum Weight Matching in Bipartite Graphs."](https://web.eecs.umich.edu/~pettie/matching/Duan-Su-scaling-bipartite-matching.pdf) Proceedings of the Twenty-Third Annual ACM-SIAM Symposium on Discrete Algorithms, p. 1413-1424. (2012)
|
|
||||||
* Melhorn, K., Näher, St. [The LEDA Platform of Combinatorial and Geometric Computing.](https://people.mpi-inf.mpg.de/~mehlhorn/LEDAbook.html) Cambridge University Press. Chapter 7, Graph Algorithms; p. 132-162 (1999)
|
|
||||||
* Fredman, M., Tarjan, R. ["Fibonacci heaps and their uses in improved network optimization algorithms."](https://www.cl.cam.ac.uk/teaching/1011/AlgorithII/1987-FredmanTar-fibonacci.pdf) J. ACM, 34(3):596–615 (1987))
|
|
||||||
|
|
||||||
## EXTERNAL LIBRARIES USED
|
|
||||||
* [JGraphT](https://jgrapht.org) -- Graph theory data structures and algorithms
|
|
||||||
* [JHeaps](https://www.jheaps.org) -- For pairing heap priority queue used in maximum weight matching algorithm
|
|
||||||
* [Apache Commons CSV](https://commons.apache.org/proper/commons-csv/) -- For CSV file output
|
|
||||||
* [Apache Commons CLI](https://commons.apache.org/proper/commons-cli/) -- To enable command line arguments for scripting.
|
|
||||||
|
|
||||||
## ACKNOWLEDGEMENTS
|
|
||||||
BiGpairSEQ was conceived in collaboration with Dr. Alice MacQueen, who brought the original
|
|
||||||
pairSEQ paper to the author's attention and explained all the biology terms he didn't know.
|
|
||||||
|
|
||||||
## AUTHOR
|
|
||||||
BiGpairSEQ algorithm and simulation by Eugene Fischer, 2021. UI improvements and documentation, 2022.
|
|
||||||
5
src/main/java/AlgorithmType.java
Normal file
5
src/main/java/AlgorithmType.java
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
public enum AlgorithmType {
|
||||||
|
HUNGARIAN, //Hungarian algorithm
|
||||||
|
AUCTION, //Forward auction algorithm
|
||||||
|
INTEGER_WEIGHT_SCALING, //integer weight scaling algorithm of Duan and Su
|
||||||
|
}
|
||||||
@@ -13,10 +13,13 @@ public class BiGpairSEQ {
|
|||||||
private static boolean cacheCells = false;
|
private static boolean cacheCells = false;
|
||||||
private static boolean cachePlate = false;
|
private static boolean cachePlate = false;
|
||||||
private static boolean cacheGraph = false;
|
private static boolean cacheGraph = false;
|
||||||
private static HeapType priorityQueueHeapType = HeapType.FIBONACCI;
|
private static AlgorithmType matchingAlgorithmType = AlgorithmType.HUNGARIAN;
|
||||||
|
private static HeapType priorityQueueHeapType = HeapType.PAIRING;
|
||||||
|
private static DistributionType distributionType = DistributionType.ZIPF;
|
||||||
private static boolean outputBinary = true;
|
private static boolean outputBinary = true;
|
||||||
private static boolean outputGraphML = false;
|
private static boolean outputGraphML = false;
|
||||||
private static final String version = "version 3.0";
|
private static boolean calculatePValue = false;
|
||||||
|
private static final String version = "version 4.2";
|
||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
if (args.length == 0) {
|
if (args.length == 0) {
|
||||||
@@ -58,6 +61,10 @@ public class BiGpairSEQ {
|
|||||||
return cellFilename;
|
return cellFilename;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static DistributionType getDistributionType() {return distributionType;}
|
||||||
|
|
||||||
|
public static void setDistributionType(DistributionType type) {distributionType = type;}
|
||||||
|
|
||||||
public static Plate getPlateInMemory() {
|
public static Plate getPlateInMemory() {
|
||||||
return plateInMemory;
|
return plateInMemory;
|
||||||
}
|
}
|
||||||
@@ -107,7 +114,6 @@ public class BiGpairSEQ {
|
|||||||
return graphFilename;
|
return graphFilename;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public static boolean cacheCells() {
|
public static boolean cacheCells() {
|
||||||
return cacheCells;
|
return cacheCells;
|
||||||
}
|
}
|
||||||
@@ -156,10 +162,18 @@ public class BiGpairSEQ {
|
|||||||
BiGpairSEQ.cacheGraph = cacheGraph;
|
BiGpairSEQ.cacheGraph = cacheGraph;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getPriorityQueueHeapType() {
|
public static HeapType getPriorityQueueHeapType() {
|
||||||
return priorityQueueHeapType.name();
|
return priorityQueueHeapType;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static AlgorithmType getMatchingAlgorithmType() { return matchingAlgorithmType; }
|
||||||
|
|
||||||
|
public static void setHungarianAlgorithm() { matchingAlgorithmType = AlgorithmType.HUNGARIAN; }
|
||||||
|
|
||||||
|
public static void setIntegerWeightScalingAlgorithm() { matchingAlgorithmType = AlgorithmType.INTEGER_WEIGHT_SCALING; }
|
||||||
|
|
||||||
|
public static void setAuctionAlgorithm() { matchingAlgorithmType = AlgorithmType.AUCTION; }
|
||||||
|
|
||||||
public static void setPairingHeap() {
|
public static void setPairingHeap() {
|
||||||
priorityQueueHeapType = HeapType.PAIRING;
|
priorityQueueHeapType = HeapType.PAIRING;
|
||||||
}
|
}
|
||||||
@@ -173,5 +187,9 @@ public class BiGpairSEQ {
|
|||||||
|
|
||||||
public static boolean outputGraphML() {return outputGraphML;}
|
public static boolean outputGraphML() {return outputGraphML;}
|
||||||
public static void setOutputGraphML(boolean b) {outputGraphML = b;}
|
public static void setOutputGraphML(boolean b) {outputGraphML = b;}
|
||||||
|
|
||||||
|
public static boolean calculatePValue() {return calculatePValue; }
|
||||||
|
public static void setCalculatePValue(boolean b) {calculatePValue = b; }
|
||||||
|
|
||||||
public static String getVersion() { return version; }
|
public static String getVersion() { return version; }
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -58,7 +58,9 @@ public class CellFileReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public CellSample getCellSample() {
|
public CellSample getCellSample() {
|
||||||
return new CellSample(distinctCells, cdr1Freq);
|
CellSample sample = new CellSample(distinctCells, cdr1Freq);
|
||||||
|
sample.setFilename(filename);
|
||||||
|
return sample;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getFilename() { return filename;}
|
public String getFilename() { return filename;}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ public class CellSample {
|
|||||||
|
|
||||||
private List<String[]> cells;
|
private List<String[]> cells;
|
||||||
private Integer cdr1Freq;
|
private Integer cdr1Freq;
|
||||||
|
private String filename;
|
||||||
|
|
||||||
public CellSample(Integer numDistinctCells, Integer cdr1Freq){
|
public CellSample(Integer numDistinctCells, Integer cdr1Freq){
|
||||||
this.cdr1Freq = cdr1Freq;
|
this.cdr1Freq = cdr1Freq;
|
||||||
@@ -38,6 +39,7 @@ public class CellSample {
|
|||||||
distinctCells.add(tmp);
|
distinctCells.add(tmp);
|
||||||
}
|
}
|
||||||
this.cells = distinctCells;
|
this.cells = distinctCells;
|
||||||
|
this.filename = filename;
|
||||||
}
|
}
|
||||||
|
|
||||||
public CellSample(List<String[]> cells, Integer cdr1Freq){
|
public CellSample(List<String[]> cells, Integer cdr1Freq){
|
||||||
@@ -57,4 +59,8 @@ public class CellSample {
|
|||||||
return cells.size();
|
return cells.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getFilename() { return filename; }
|
||||||
|
|
||||||
|
public void setFilename(String filename) { this.filename = filename; }
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ import java.util.stream.Stream;
|
|||||||
* minpercent : (optional) the minimum percent overlap to attempt a matching.
|
* minpercent : (optional) the minimum percent overlap to attempt a matching.
|
||||||
* writefile : (optional) the filename to write results to
|
* writefile : (optional) the filename to write results to
|
||||||
* output : the values to print to System.out for piping
|
* output : the values to print to System.out for piping
|
||||||
|
* pv : (optional) calculate p-values
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class CommandLineInterface {
|
public class CommandLineInterface {
|
||||||
@@ -96,7 +97,7 @@ public class CommandLineInterface {
|
|||||||
Integer[] populations;
|
Integer[] populations;
|
||||||
String outputFilename = line.getOptionValue("o");
|
String outputFilename = line.getOptionValue("o");
|
||||||
Integer numWells = Integer.parseInt(line.getOptionValue("w"));
|
Integer numWells = Integer.parseInt(line.getOptionValue("w"));
|
||||||
Double dropoutRate = Double.parseDouble(line.getOptionValue("err"));
|
Double dropoutRate = Double.parseDouble(line.getOptionValue("d"));
|
||||||
if (line.hasOption("random")) {
|
if (line.hasOption("random")) {
|
||||||
//Array holding values of minimum and maximum populations
|
//Array holding values of minimum and maximum populations
|
||||||
Integer[] min_max = Stream.of(line.getOptionValues("random"))
|
Integer[] min_max = Stream.of(line.getOptionValues("random"))
|
||||||
@@ -122,16 +123,20 @@ public class CommandLineInterface {
|
|||||||
Plate plate;
|
Plate plate;
|
||||||
if (line.hasOption("poisson")) {
|
if (line.hasOption("poisson")) {
|
||||||
Double stdDev = Math.sqrt(numWells);
|
Double stdDev = Math.sqrt(numWells);
|
||||||
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, stdDev, false);
|
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, stdDev);
|
||||||
}
|
}
|
||||||
else if (line.hasOption("gaussian")) {
|
else if (line.hasOption("gaussian")) {
|
||||||
Double stdDev = Double.parseDouble(line.getOptionValue("stddev"));
|
Double stdDev = Double.parseDouble(line.getOptionValue("stddev"));
|
||||||
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, stdDev, false);
|
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, stdDev);
|
||||||
|
}
|
||||||
|
else if (line.hasOption("zipf")) {
|
||||||
|
Double zipfExponent = Double.parseDouble(line.getOptionValue("exp"));
|
||||||
|
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, zipfExponent);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
assert line.hasOption("exponential");
|
assert line.hasOption("exponential");
|
||||||
Double lambda = Double.parseDouble(line.getOptionValue("lambda"));
|
Double lambda = Double.parseDouble(line.getOptionValue("lambda"));
|
||||||
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, lambda, true);
|
plate = new Plate(cells, cellFilename, numWells, populations, dropoutRate, lambda);
|
||||||
}
|
}
|
||||||
PlateFileWriter writer = new PlateFileWriter(outputFilename, plate);
|
PlateFileWriter writer = new PlateFileWriter(outputFilename, plate);
|
||||||
writer.writePlateFile();
|
writer.writePlateFile();
|
||||||
@@ -150,16 +155,21 @@ public class CommandLineInterface {
|
|||||||
Integer readDepth = 1;
|
Integer readDepth = 1;
|
||||||
Double readErrorRate = 0.0;
|
Double readErrorRate = 0.0;
|
||||||
Double errorCollisionRate = 0.0;
|
Double errorCollisionRate = 0.0;
|
||||||
|
Double realSequenceCollisionRate = 0.0;
|
||||||
if (line.hasOption("rd")) {
|
if (line.hasOption("rd")) {
|
||||||
readDepth = Integer.parseInt(line.getOptionValue("rd"));
|
readDepth = Integer.parseInt(line.getOptionValue("rd"));
|
||||||
}
|
}
|
||||||
if (line.hasOption("err")) {
|
if (line.hasOption("err")) {
|
||||||
readErrorRate = Double.parseDouble(line.getOptionValue("err"));
|
readErrorRate = Double.parseDouble(line.getOptionValue("err"));
|
||||||
}
|
}
|
||||||
if (line.hasOption("coll")) {
|
if (line.hasOption("errcoll")) {
|
||||||
errorCollisionRate = Double.parseDouble(line.getOptionValue("coll"));
|
errorCollisionRate = Double.parseDouble(line.getOptionValue("errcoll"));
|
||||||
}
|
}
|
||||||
graph = Simulator.makeCDR3Graph(cells, plate, readDepth, readErrorRate, errorCollisionRate, false);
|
if (line.hasOption("realcoll")) {
|
||||||
|
realSequenceCollisionRate = Double.parseDouble(line.getOptionValue("realcoll"));
|
||||||
|
}
|
||||||
|
graph = Simulator.makeCDR3Graph(cells, plate, readDepth, readErrorRate, errorCollisionRate,
|
||||||
|
realSequenceCollisionRate, false);
|
||||||
if (!line.hasOption("no-binary")) { //output binary file unless told not to
|
if (!line.hasOption("no-binary")) { //output binary file unless told not to
|
||||||
GraphDataObjectWriter writer = new GraphDataObjectWriter(outputFilename, graph, false);
|
GraphDataObjectWriter writer = new GraphDataObjectWriter(outputFilename, graph, false);
|
||||||
writer.writeDataToFile();
|
writer.writeDataToFile();
|
||||||
@@ -197,9 +207,12 @@ public class CommandLineInterface {
|
|||||||
else {
|
else {
|
||||||
maxOccupancyDiff = Integer.MAX_VALUE;
|
maxOccupancyDiff = Integer.MAX_VALUE;
|
||||||
}
|
}
|
||||||
|
if (line.hasOption("pv")) {
|
||||||
|
BiGpairSEQ.setCalculatePValue(true);
|
||||||
|
}
|
||||||
GraphWithMapData graph = getGraph(graphFilename);
|
GraphWithMapData graph = getGraph(graphFilename);
|
||||||
MatchingResult result = Simulator.matchCDR3s(graph, graphFilename, minThreshold, maxThreshold,
|
MatchingResult result = Simulator.matchCDR3s(graph, graphFilename, minThreshold, maxThreshold,
|
||||||
maxOccupancyDiff, minOverlapPct, false);
|
maxOccupancyDiff, minOverlapPct, false, BiGpairSEQ.calculatePValue());
|
||||||
if(outputFilename != null){
|
if(outputFilename != null){
|
||||||
MatchingFileWriter writer = new MatchingFileWriter(outputFilename, result);
|
MatchingFileWriter writer = new MatchingFileWriter(outputFilename, result);
|
||||||
writer.writeResultsToFile();
|
writer.writeResultsToFile();
|
||||||
@@ -331,9 +344,13 @@ public class CommandLineInterface {
|
|||||||
Option exponential = Option.builder("exponential")
|
Option exponential = Option.builder("exponential")
|
||||||
.desc("Use an exponential distribution for cell sample")
|
.desc("Use an exponential distribution for cell sample")
|
||||||
.build();
|
.build();
|
||||||
|
Option zipf = Option.builder("zipf")
|
||||||
|
.desc("Use a Zipf distribution for cell sample")
|
||||||
|
.build();
|
||||||
distributions.addOption(poisson);
|
distributions.addOption(poisson);
|
||||||
distributions.addOption(gaussian);
|
distributions.addOption(gaussian);
|
||||||
distributions.addOption(exponential);
|
distributions.addOption(exponential);
|
||||||
|
distributions.addOption(zipf);
|
||||||
//options group for statistical distribution parameters
|
//options group for statistical distribution parameters
|
||||||
OptionGroup statParams = new OptionGroup();// add this to plate options
|
OptionGroup statParams = new OptionGroup();// add this to plate options
|
||||||
Option stdDev = Option.builder("stddev")
|
Option stdDev = Option.builder("stddev")
|
||||||
@@ -346,6 +363,11 @@ public class CommandLineInterface {
|
|||||||
.hasArg()
|
.hasArg()
|
||||||
.argName("value")
|
.argName("value")
|
||||||
.build();
|
.build();
|
||||||
|
Option zipfExponent = Option.builder("exp")
|
||||||
|
.desc("If using -zipf flag, exponent value for distribution")
|
||||||
|
.hasArg()
|
||||||
|
.argName("value")
|
||||||
|
.build();
|
||||||
statParams.addOption(stdDev);
|
statParams.addOption(stdDev);
|
||||||
statParams.addOption(lambda);
|
statParams.addOption(lambda);
|
||||||
//Option group for random plate or set populations
|
//Option group for random plate or set populations
|
||||||
@@ -362,7 +384,8 @@ public class CommandLineInterface {
|
|||||||
.hasArgs()
|
.hasArgs()
|
||||||
.argName("number [number]...")
|
.argName("number [number]...")
|
||||||
.build();
|
.build();
|
||||||
Option dropoutRate = Option.builder("err") //add this to plate options
|
Option dropoutRate = Option.builder("d") //add this to plate options
|
||||||
|
.longOpt("dropout-rate")
|
||||||
.hasArg()
|
.hasArg()
|
||||||
.desc("The sequence dropout rate due to amplification error. (0.0 - 1.0)")
|
.desc("The sequence dropout rate due to amplification error. (0.0 - 1.0)")
|
||||||
.argName("rate")
|
.argName("rate")
|
||||||
@@ -376,6 +399,7 @@ public class CommandLineInterface {
|
|||||||
plateOptions.addOptionGroup(statParams);
|
plateOptions.addOptionGroup(statParams);
|
||||||
plateOptions.addOptionGroup(wellPopOptions);
|
plateOptions.addOptionGroup(wellPopOptions);
|
||||||
plateOptions.addOption(dropoutRate);
|
plateOptions.addOption(dropoutRate);
|
||||||
|
plateOptions.addOption(zipfExponent);
|
||||||
plateOptions.addOption(outputFileOption());
|
plateOptions.addOption(outputFileOption());
|
||||||
return plateOptions;
|
return plateOptions;
|
||||||
}
|
}
|
||||||
@@ -413,12 +437,20 @@ public class CommandLineInterface {
|
|||||||
.hasArg()
|
.hasArg()
|
||||||
.argName("prob")
|
.argName("prob")
|
||||||
.build();
|
.build();
|
||||||
Option errorCollisionProb = Option.builder("coll")
|
Option errorCollisionProb = Option.builder("errcoll")
|
||||||
.longOpt("error-collision-prob")
|
.longOpt("error-collision-prob")
|
||||||
.desc("(Optional) The probability that two misreads will produce the same spurious sequence. (0.0 - 1.0)")
|
.desc("(Optional) The probability that two misreads will produce the same spurious sequence. (0.0 - 1.0)")
|
||||||
.hasArg()
|
.hasArg()
|
||||||
.argName("prob")
|
.argName("prob")
|
||||||
.build();
|
.build();
|
||||||
|
Option realSequenceCollisionProb = Option.builder("realcoll")
|
||||||
|
.longOpt("real-collision-prob")
|
||||||
|
.desc("(Optional) The probability that a sequence will be misread " +
|
||||||
|
"as another real sequence. (Only applies to unique misreads; after this has happened once, " +
|
||||||
|
"future error collisions could produce the real sequence again) (0.0 - 1.0)")
|
||||||
|
.hasArg()
|
||||||
|
.argName("prob")
|
||||||
|
.build();
|
||||||
graphOptions.addOption(cellFilename);
|
graphOptions.addOption(cellFilename);
|
||||||
graphOptions.addOption(plateFilename);
|
graphOptions.addOption(plateFilename);
|
||||||
graphOptions.addOption(outputFileOption());
|
graphOptions.addOption(outputFileOption());
|
||||||
@@ -427,6 +459,7 @@ public class CommandLineInterface {
|
|||||||
graphOptions.addOption(readDepth);
|
graphOptions.addOption(readDepth);
|
||||||
graphOptions.addOption(readErrorProb);
|
graphOptions.addOption(readErrorProb);
|
||||||
graphOptions.addOption(errorCollisionProb);
|
graphOptions.addOption(errorCollisionProb);
|
||||||
|
graphOptions.addOption(realSequenceCollisionProb);
|
||||||
return graphOptions;
|
return graphOptions;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -464,12 +497,17 @@ public class CommandLineInterface {
|
|||||||
.argName("filename")
|
.argName("filename")
|
||||||
.desc("(Optional) Name of output the output file. If not present, no file will be written.")
|
.desc("(Optional) Name of output the output file. If not present, no file will be written.")
|
||||||
.build();
|
.build();
|
||||||
|
Option pValue = Option.builder("pv") //can't call the method this time, because this one's optional
|
||||||
|
.longOpt("p-value")
|
||||||
|
.desc("(Optional) Calculate p-values for sequence pairs.")
|
||||||
|
.build();
|
||||||
matchCDR3options.addOption(graphFilename)
|
matchCDR3options.addOption(graphFilename)
|
||||||
.addOption(minOccupancyOverlap)
|
.addOption(minOccupancyOverlap)
|
||||||
.addOption(maxOccupancyOverlap)
|
.addOption(maxOccupancyOverlap)
|
||||||
.addOption(minOverlapPercent)
|
.addOption(minOverlapPercent)
|
||||||
.addOption(maxOccupancyDifference)
|
.addOption(maxOccupancyDifference)
|
||||||
.addOption(outputFile);
|
.addOption(outputFile)
|
||||||
|
.addOption(pValue);
|
||||||
|
|
||||||
//options for output to System.out
|
//options for output to System.out
|
||||||
Option printAlphaCount = Option.builder().longOpt("print-alphas")
|
Option printAlphaCount = Option.builder().longOpt("print-alphas")
|
||||||
|
|||||||
6
src/main/java/DistributionType.java
Normal file
6
src/main/java/DistributionType.java
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
public enum DistributionType {
|
||||||
|
POISSON,
|
||||||
|
GAUSSIAN,
|
||||||
|
EXPONENTIAL,
|
||||||
|
ZIPF
|
||||||
|
}
|
||||||
@@ -5,7 +5,6 @@ import org.jgrapht.nio.AttributeType;
|
|||||||
import org.jgrapht.nio.DefaultAttribute;
|
import org.jgrapht.nio.DefaultAttribute;
|
||||||
import org.jgrapht.nio.graphml.GraphMLExporter;
|
import org.jgrapht.nio.graphml.GraphMLExporter;
|
||||||
import org.jgrapht.nio.graphml.GraphMLExporter.AttributeCategory;
|
import org.jgrapht.nio.graphml.GraphMLExporter.AttributeCategory;
|
||||||
import org.w3c.dom.Attr;
|
|
||||||
|
|
||||||
import java.io.BufferedWriter;
|
import java.io.BufferedWriter;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@@ -13,6 +12,7 @@ import java.nio.file.Files;
|
|||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.nio.file.StandardOpenOption;
|
import java.nio.file.StandardOpenOption;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.Iterator;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
public class GraphMLFileWriter {
|
public class GraphMLFileWriter {
|
||||||
@@ -41,11 +41,11 @@ public class GraphMLFileWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private Map<String, Attribute> createGraphAttributes(){
|
private Map<String, Attribute> createGraphAttributes(){
|
||||||
Map<String, Attribute> ga = new HashMap<>();
|
Map<String, Attribute> attributes = new HashMap<>();
|
||||||
//Sample plate filename
|
//Sample plate filename
|
||||||
ga.put("sample plate filename", DefaultAttribute.createAttribute(data.getSourceFilename()));
|
attributes.put("sample plate filename", DefaultAttribute.createAttribute(data.getPlateFilename()));
|
||||||
// Number of wells
|
// Number of wells
|
||||||
ga.put("well count", DefaultAttribute.createAttribute(data.getNumWells().toString()));
|
attributes.put("well count", DefaultAttribute.createAttribute(data.getNumWells().toString()));
|
||||||
//Well populations
|
//Well populations
|
||||||
Integer[] wellPopulations = data.getWellPopulations();
|
Integer[] wellPopulations = data.getWellPopulations();
|
||||||
StringBuilder populationsStringBuilder = new StringBuilder();
|
StringBuilder populationsStringBuilder = new StringBuilder();
|
||||||
@@ -55,11 +55,37 @@ public class GraphMLFileWriter {
|
|||||||
populationsStringBuilder.append(wellPopulations[i].toString());
|
populationsStringBuilder.append(wellPopulations[i].toString());
|
||||||
}
|
}
|
||||||
String wellPopulationsString = populationsStringBuilder.toString();
|
String wellPopulationsString = populationsStringBuilder.toString();
|
||||||
ga.put("well populations", DefaultAttribute.createAttribute(wellPopulationsString));
|
attributes.put("well populations", DefaultAttribute.createAttribute(wellPopulationsString));
|
||||||
ga.put("read depth", DefaultAttribute.createAttribute(data.getReadDepth().toString()));
|
attributes.put("read depth", DefaultAttribute.createAttribute(data.getReadDepth().toString()));
|
||||||
ga.put("read error rate", DefaultAttribute.createAttribute(data.getReadErrorRate().toString()));
|
attributes.put("read error rate", DefaultAttribute.createAttribute(data.getReadErrorRate().toString()));
|
||||||
ga.put("error collision rate", DefaultAttribute.createAttribute(data.getErrorCollisionRate().toString()));
|
attributes.put("error collision rate", DefaultAttribute.createAttribute(data.getErrorCollisionRate().toString()));
|
||||||
return ga;
|
attributes.put("real sequence collision rate", DefaultAttribute.createAttribute(data.getRealSequenceCollisionRate()));
|
||||||
|
return attributes;
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<String, Attribute> createVertexAttributes(Vertex v){
|
||||||
|
Map<String, Attribute> attributes = new HashMap<>();
|
||||||
|
//sequence type
|
||||||
|
attributes.put("type", DefaultAttribute.createAttribute(v.getType().name()));
|
||||||
|
//sequence
|
||||||
|
attributes.put("sequence", DefaultAttribute.createAttribute(v.getSequence()));
|
||||||
|
//number of wells the sequence appears in
|
||||||
|
attributes.put("occupancy", DefaultAttribute.createAttribute(v.getOccupancy()));
|
||||||
|
//total number of times the sequence was read
|
||||||
|
attributes.put("total read count", DefaultAttribute.createAttribute(v.getReadCount()));
|
||||||
|
StringBuilder wellsAndReadCountsBuilder = new StringBuilder();
|
||||||
|
Iterator<Map.Entry<Integer, Integer>> wellOccupancies = v.getWellOccupancies().entrySet().iterator();
|
||||||
|
while (wellOccupancies.hasNext()) {
|
||||||
|
Map.Entry<Integer, Integer> entry = wellOccupancies.next();
|
||||||
|
wellsAndReadCountsBuilder.append(entry.getKey() + ":" + entry.getValue());
|
||||||
|
if (wellOccupancies.hasNext()) {
|
||||||
|
wellsAndReadCountsBuilder.append(", ");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
String wellsAndReadCounts = wellsAndReadCountsBuilder.toString();
|
||||||
|
//the wells the sequence appears in and the read counts in those wells
|
||||||
|
attributes.put("wells:read counts", DefaultAttribute.createAttribute(wellsAndReadCounts));
|
||||||
|
return attributes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void writeGraphToFile() {
|
public void writeGraphToFile() {
|
||||||
@@ -72,15 +98,7 @@ public class GraphMLFileWriter {
|
|||||||
//Set graph attributes
|
//Set graph attributes
|
||||||
exporter.setGraphAttributeProvider( () -> graphAttributes);
|
exporter.setGraphAttributeProvider( () -> graphAttributes);
|
||||||
//set type, sequence, and occupancy attributes for each vertex
|
//set type, sequence, and occupancy attributes for each vertex
|
||||||
//NEED TO ADD NEW FIELD FOR READ COUNT
|
exporter.setVertexAttributeProvider(this::createVertexAttributes);
|
||||||
exporter.setVertexAttributeProvider( v -> {
|
|
||||||
Map<String, Attribute> attributes = new HashMap<>();
|
|
||||||
attributes.put("type", DefaultAttribute.createAttribute(v.getType().name()));
|
|
||||||
attributes.put("sequence", DefaultAttribute.createAttribute(v.getSequence()));
|
|
||||||
attributes.put("occupancy", DefaultAttribute.createAttribute(v.getOccupancy()));
|
|
||||||
attributes.put("read count", DefaultAttribute.createAttribute(v.getReadCount()));
|
|
||||||
return attributes;
|
|
||||||
});
|
|
||||||
//register the attributes
|
//register the attributes
|
||||||
for(String s : graphAttributes.keySet()) {
|
for(String s : graphAttributes.keySet()) {
|
||||||
exporter.registerAttribute(s, AttributeCategory.GRAPH, AttributeType.STRING);
|
exporter.registerAttribute(s, AttributeCategory.GRAPH, AttributeType.STRING);
|
||||||
@@ -88,7 +106,8 @@ public class GraphMLFileWriter {
|
|||||||
exporter.registerAttribute("type", AttributeCategory.NODE, AttributeType.STRING);
|
exporter.registerAttribute("type", AttributeCategory.NODE, AttributeType.STRING);
|
||||||
exporter.registerAttribute("sequence", AttributeCategory.NODE, AttributeType.STRING);
|
exporter.registerAttribute("sequence", AttributeCategory.NODE, AttributeType.STRING);
|
||||||
exporter.registerAttribute("occupancy", AttributeCategory.NODE, AttributeType.STRING);
|
exporter.registerAttribute("occupancy", AttributeCategory.NODE, AttributeType.STRING);
|
||||||
exporter.registerAttribute("read count", AttributeCategory.NODE, AttributeType.STRING);
|
exporter.registerAttribute("total read count", AttributeCategory.NODE, AttributeType.STRING);
|
||||||
|
exporter.registerAttribute("wells:read counts", AttributeCategory.NODE, AttributeType.STRING);
|
||||||
//export the graph
|
//export the graph
|
||||||
exporter.exportGraph(graph, writer);
|
exporter.exportGraph(graph, writer);
|
||||||
} catch(IOException ex){
|
} catch(IOException ex){
|
||||||
|
|||||||
@@ -1,72 +1,54 @@
|
|||||||
import org.jgrapht.graph.DefaultWeightedEdge;
|
import org.jgrapht.graph.DefaultWeightedEdge;
|
||||||
import org.jgrapht.graph.SimpleWeightedGraph;
|
import org.jgrapht.graph.SimpleWeightedGraph;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.*;
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
public interface GraphModificationFunctions {
|
public interface GraphModificationFunctions {
|
||||||
|
|
||||||
//remove over- and under-weight edges, return removed edges
|
//remove over- and under-weight edges, return removed edges
|
||||||
static Map<Vertex[], Integer> filterByOverlapThresholds(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
|
static Map<DefaultWeightedEdge, Vertex[]> filterByOverlapThresholds(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
|
||||||
int low, int high, boolean saveEdges) {
|
int low, int high, boolean saveEdges) {
|
||||||
Map<Vertex[], Integer> removedEdges = new HashMap<>();
|
Map<DefaultWeightedEdge, Vertex[]> removedEdges = new HashMap<>();
|
||||||
|
Set<DefaultWeightedEdge> edgesToRemove = new HashSet<>();
|
||||||
for (DefaultWeightedEdge e : graph.edgeSet()) {
|
for (DefaultWeightedEdge e : graph.edgeSet()) {
|
||||||
if ((graph.getEdgeWeight(e) > high) || (graph.getEdgeWeight(e) < low)) {
|
if ((graph.getEdgeWeight(e) > high) || (graph.getEdgeWeight(e) < low)) {
|
||||||
if(saveEdges) {
|
if(saveEdges) {
|
||||||
Vertex source = graph.getEdgeSource(e);
|
Vertex[] vertices = {graph.getEdgeSource(e), graph.getEdgeTarget(e)};
|
||||||
Vertex target = graph.getEdgeTarget(e);
|
removedEdges.put(e, vertices);
|
||||||
Integer weight = (int) graph.getEdgeWeight(e);
|
|
||||||
Vertex[] edge = {source, target};
|
|
||||||
removedEdges.put(edge, weight);
|
|
||||||
}
|
}
|
||||||
else {
|
edgesToRemove.add(e);
|
||||||
graph.setEdgeWeight(e, 0.0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if(saveEdges) {
|
|
||||||
for (Vertex[] edge : removedEdges.keySet()) {
|
|
||||||
graph.removeEdge(edge[0], edge[1]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
edgesToRemove.forEach(graph::removeEdge);
|
||||||
return removedEdges;
|
return removedEdges;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Remove edges for pairs with large occupancy discrepancy, return removed edges
|
//Remove edges for pairs with large occupancy discrepancy, return removed edges
|
||||||
static Map<Vertex[], Integer> filterByRelativeOccupancy(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
|
static Map<DefaultWeightedEdge, Vertex[]> filterByRelativeOccupancy(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
|
||||||
Integer maxOccupancyDifference, boolean saveEdges) {
|
Integer maxOccupancyDifference, boolean saveEdges) {
|
||||||
Map<Vertex[], Integer> removedEdges = new HashMap<>();
|
Map<DefaultWeightedEdge, Vertex[]> removedEdges = new HashMap<>();
|
||||||
|
Set<DefaultWeightedEdge> edgesToRemove = new HashSet<>();
|
||||||
for (DefaultWeightedEdge e : graph.edgeSet()) {
|
for (DefaultWeightedEdge e : graph.edgeSet()) {
|
||||||
Integer alphaOcc = graph.getEdgeSource(e).getOccupancy();
|
Integer alphaOcc = graph.getEdgeSource(e).getOccupancy();
|
||||||
Integer betaOcc = graph.getEdgeTarget(e).getOccupancy();
|
Integer betaOcc = graph.getEdgeTarget(e).getOccupancy();
|
||||||
if (Math.abs(alphaOcc - betaOcc) >= maxOccupancyDifference) {
|
if (Math.abs(alphaOcc - betaOcc) >= maxOccupancyDifference) {
|
||||||
if (saveEdges) {
|
if (saveEdges) {
|
||||||
Vertex source = graph.getEdgeSource(e);
|
Vertex[] vertices = {graph.getEdgeSource(e), graph.getEdgeTarget(e)};
|
||||||
Vertex target = graph.getEdgeTarget(e);
|
removedEdges.put(e, vertices);
|
||||||
Integer weight = (int) graph.getEdgeWeight(e);
|
|
||||||
Vertex[] edge = {source, target};
|
|
||||||
removedEdges.put(edge, weight);
|
|
||||||
}
|
}
|
||||||
else {
|
edgesToRemove.add(e);
|
||||||
graph.setEdgeWeight(e, 0.0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if(saveEdges) {
|
|
||||||
for (Vertex[] edge : removedEdges.keySet()) {
|
|
||||||
graph.removeEdge(edge[0], edge[1]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
edgesToRemove.forEach(graph::removeEdge);
|
||||||
return removedEdges;
|
return removedEdges;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Remove edges for pairs where overlap size is significantly lower than the well occupancy, return removed edges
|
//Remove edges for pairs where overlap size is significantly lower than the well occupancy, return removed edges
|
||||||
static Map<Vertex[], Integer> filterByOverlapPercent(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
|
static Map<DefaultWeightedEdge, Vertex[]> filterByOverlapPercent(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
|
||||||
Integer minOverlapPercent,
|
Integer minOverlapPercent,
|
||||||
boolean saveEdges) {
|
boolean saveEdges) {
|
||||||
Map<Vertex[], Integer> removedEdges = new HashMap<>();
|
Map<DefaultWeightedEdge, Vertex[]> removedEdges = new HashMap<>();
|
||||||
|
Set<DefaultWeightedEdge> edgesToRemove = new HashSet<>();
|
||||||
for (DefaultWeightedEdge e : graph.edgeSet()) {
|
for (DefaultWeightedEdge e : graph.edgeSet()) {
|
||||||
Integer alphaOcc = graph.getEdgeSource(e).getOccupancy();
|
Integer alphaOcc = graph.getEdgeSource(e).getOccupancy();
|
||||||
Integer betaOcc = graph.getEdgeTarget(e).getOccupancy();
|
Integer betaOcc = graph.getEdgeTarget(e).getOccupancy();
|
||||||
@@ -74,22 +56,13 @@ public interface GraphModificationFunctions {
|
|||||||
double min = minOverlapPercent / 100.0;
|
double min = minOverlapPercent / 100.0;
|
||||||
if ((weight / alphaOcc < min) || (weight / betaOcc < min)) {
|
if ((weight / alphaOcc < min) || (weight / betaOcc < min)) {
|
||||||
if (saveEdges) {
|
if (saveEdges) {
|
||||||
Vertex source = graph.getEdgeSource(e);
|
Vertex[] vertices = {graph.getEdgeSource(e), graph.getEdgeTarget(e)};
|
||||||
Vertex target = graph.getEdgeTarget(e);
|
removedEdges.put(e, vertices);
|
||||||
Integer intWeight = (int) graph.getEdgeWeight(e);
|
|
||||||
Vertex[] edge = {source, target};
|
|
||||||
removedEdges.put(edge, intWeight);
|
|
||||||
}
|
}
|
||||||
else {
|
edgesToRemove.add(e);
|
||||||
graph.setEdgeWeight(e, 0.0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if(saveEdges) {
|
|
||||||
for (Vertex[] edge : removedEdges.keySet()) {
|
|
||||||
graph.removeEdge(edge[0], edge[1]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
edgesToRemove.forEach(graph::removeEdge);
|
||||||
return removedEdges;
|
return removedEdges;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -126,10 +99,10 @@ public interface GraphModificationFunctions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void addRemovedEdges(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
|
static void addRemovedEdges(SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph,
|
||||||
Map<Vertex[], Integer> removedEdges) {
|
Map<DefaultWeightedEdge, Vertex[]> removedEdges) {
|
||||||
for (Vertex[] edge : removedEdges.keySet()) {
|
for (DefaultWeightedEdge edge : removedEdges.keySet()) {
|
||||||
DefaultWeightedEdge e = graph.addEdge(edge[0], edge[1]);
|
Vertex[] vertices = removedEdges.get(edge);
|
||||||
graph.setEdgeWeight(e, removedEdges.get(edge));
|
graph.addEdge(vertices[0], vertices[1], edge);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -9,15 +9,19 @@ import java.util.Map;
|
|||||||
//Custom vertex class means a lot of the map data can now be encoded in the graph itself
|
//Custom vertex class means a lot of the map data can now be encoded in the graph itself
|
||||||
public class GraphWithMapData implements java.io.Serializable {
|
public class GraphWithMapData implements java.io.Serializable {
|
||||||
|
|
||||||
private String sourceFilename;
|
private String cellFilename;
|
||||||
|
private int cellSampleSize;
|
||||||
|
private String plateFilename;
|
||||||
private final SimpleWeightedGraph graph;
|
private final SimpleWeightedGraph graph;
|
||||||
private Integer numWells;
|
private final int numWells;
|
||||||
private Integer[] wellPopulations;
|
private final Integer[] wellPopulations;
|
||||||
private Integer alphaCount;
|
private final int alphaCount;
|
||||||
private Integer betaCount;
|
private final int betaCount;
|
||||||
private int readDepth;
|
private final double dropoutRate;
|
||||||
private double readErrorRate;
|
private final int readDepth;
|
||||||
private double errorCollisionRate;
|
private final double readErrorRate;
|
||||||
|
private final double errorCollisionRate;
|
||||||
|
private final double realSequenceCollisionRate;
|
||||||
private final Map<String, String> distCellsMapAlphaKey;
|
private final Map<String, String> distCellsMapAlphaKey;
|
||||||
// private final Map<Integer, Integer> plateVtoAMap;
|
// private final Map<Integer, Integer> plateVtoAMap;
|
||||||
// private final Map<Integer, Integer> plateVtoBMap;
|
// private final Map<Integer, Integer> plateVtoBMap;
|
||||||
@@ -29,7 +33,8 @@ public class GraphWithMapData implements java.io.Serializable {
|
|||||||
|
|
||||||
public GraphWithMapData(SimpleWeightedGraph graph, Integer numWells, Integer[] wellConcentrations,
|
public GraphWithMapData(SimpleWeightedGraph graph, Integer numWells, Integer[] wellConcentrations,
|
||||||
Map<String, String> distCellsMapAlphaKey, Integer alphaCount, Integer betaCount,
|
Map<String, String> distCellsMapAlphaKey, Integer alphaCount, Integer betaCount,
|
||||||
Integer readDepth, Double readErrorRate, Double errorCollisionRate, Duration time){
|
Double dropoutRate, Integer readDepth, Double readErrorRate, Double errorCollisionRate,
|
||||||
|
Double realSequenceCollisionRate, Duration time){
|
||||||
|
|
||||||
// Map<Integer, Integer> plateVtoAMap,
|
// Map<Integer, Integer> plateVtoAMap,
|
||||||
// Map<Integer,Integer> plateVtoBMap, Map<Integer, Integer> plateAtoVMap,
|
// Map<Integer,Integer> plateVtoBMap, Map<Integer, Integer> plateAtoVMap,
|
||||||
@@ -47,9 +52,11 @@ public class GraphWithMapData implements java.io.Serializable {
|
|||||||
// this.plateBtoVMap = plateBtoVMap;
|
// this.plateBtoVMap = plateBtoVMap;
|
||||||
// this.alphaWellCounts = alphaWellCounts;
|
// this.alphaWellCounts = alphaWellCounts;
|
||||||
// this.betaWellCounts = betaWellCounts;
|
// this.betaWellCounts = betaWellCounts;
|
||||||
|
this.dropoutRate = dropoutRate;
|
||||||
this.readDepth = readDepth;
|
this.readDepth = readDepth;
|
||||||
this.readErrorRate = readErrorRate;
|
this.readErrorRate = readErrorRate;
|
||||||
this.errorCollisionRate = errorCollisionRate;
|
this.errorCollisionRate = errorCollisionRate;
|
||||||
|
this.realSequenceCollisionRate = realSequenceCollisionRate;
|
||||||
this.time = time;
|
this.time = time;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -107,12 +114,20 @@ public class GraphWithMapData implements java.io.Serializable {
|
|||||||
return time;
|
return time;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setSourceFilename(String filename) {
|
public void setCellFilename(String filename) { this.cellFilename = filename; }
|
||||||
this.sourceFilename = filename;
|
|
||||||
|
public String getCellFilename() { return this.cellFilename; }
|
||||||
|
|
||||||
|
public Integer getCellSampleSize() { return this.cellSampleSize; }
|
||||||
|
|
||||||
|
public void setCellSampleSize(int size) { this.cellSampleSize = size;}
|
||||||
|
|
||||||
|
public void setPlateFilename(String filename) {
|
||||||
|
this.plateFilename = filename;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getSourceFilename() {
|
public String getPlateFilename() {
|
||||||
return sourceFilename;
|
return plateFilename;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Double getReadErrorRate() {
|
public Double getReadErrorRate() {
|
||||||
@@ -122,4 +137,8 @@ public class GraphWithMapData implements java.io.Serializable {
|
|||||||
public Double getErrorCollisionRate() {
|
public Double getErrorCollisionRate() {
|
||||||
return errorCollisionRate;
|
return errorCollisionRate;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Double getRealSequenceCollisionRate() { return realSequenceCollisionRate; }
|
||||||
|
|
||||||
|
public Double getDropoutRate() { return dropoutRate; }
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,14 +89,12 @@ public class InteractiveInterface {
|
|||||||
private static void makePlate() {
|
private static void makePlate() {
|
||||||
String cellFile = null;
|
String cellFile = null;
|
||||||
String filename = null;
|
String filename = null;
|
||||||
Double stdDev = 0.0;
|
Double parameter = 0.0;
|
||||||
Integer numWells = 0;
|
Integer numWells = 0;
|
||||||
Integer numSections;
|
Integer numSections;
|
||||||
Integer[] populations = {1};
|
Integer[] populations = {1};
|
||||||
Double dropOutRate = 0.0;
|
Double dropOutRate = 0.0;
|
||||||
boolean poisson = false;
|
;
|
||||||
boolean exponential = false;
|
|
||||||
double lambda = 1.5;
|
|
||||||
try {
|
try {
|
||||||
System.out.println("\nSimulated sample plates consist of:");
|
System.out.println("\nSimulated sample plates consist of:");
|
||||||
System.out.println("* a number of wells");
|
System.out.println("* a number of wells");
|
||||||
@@ -114,33 +112,46 @@ public class InteractiveInterface {
|
|||||||
System.out.println("1) Poisson");
|
System.out.println("1) Poisson");
|
||||||
System.out.println("2) Gaussian");
|
System.out.println("2) Gaussian");
|
||||||
System.out.println("3) Exponential");
|
System.out.println("3) Exponential");
|
||||||
System.out.println("(Note: approximate distribution in original paper is exponential, lambda = 0.6)");
|
System.out.println("4) Zipf");
|
||||||
System.out.println("(lambda value approximated from slope of log-log graph in figure 4c)");
|
|
||||||
System.out.println("(Note: wider distributions are more memory intensive to match)");
|
System.out.println("(Note: wider distributions are more memory intensive to match)");
|
||||||
System.out.print("Enter selection value: ");
|
System.out.print("Enter selection value: ");
|
||||||
input = sc.nextInt();
|
input = sc.nextInt();
|
||||||
switch (input) {
|
switch (input) {
|
||||||
case 1 -> poisson = true;
|
case 1 -> {
|
||||||
|
BiGpairSEQ.setDistributionType(DistributionType.POISSON);
|
||||||
|
}
|
||||||
case 2 -> {
|
case 2 -> {
|
||||||
|
BiGpairSEQ.setDistributionType(DistributionType.GAUSSIAN);
|
||||||
System.out.println("How many distinct T-cells within one standard deviation of peak frequency?");
|
System.out.println("How many distinct T-cells within one standard deviation of peak frequency?");
|
||||||
System.out.println("(Note: wider distributions are more memory intensive to match)");
|
System.out.println("(Note: wider distributions are more memory intensive to match)");
|
||||||
stdDev = sc.nextDouble();
|
parameter = sc.nextDouble();
|
||||||
if (stdDev <= 0.0) {
|
if (parameter <= 0.0) {
|
||||||
throw new InputMismatchException("Value must be positive.");
|
throw new InputMismatchException("Value must be positive.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case 3 -> {
|
case 3 -> {
|
||||||
exponential = true;
|
BiGpairSEQ.setDistributionType(DistributionType.EXPONENTIAL);
|
||||||
System.out.print("Please enter lambda value for exponential distribution: ");
|
System.out.print("Please enter lambda value for exponential distribution: ");
|
||||||
lambda = sc.nextDouble();
|
parameter = sc.nextDouble();
|
||||||
if (lambda <= 0.0) {
|
if (parameter <= 0.0) {
|
||||||
lambda = 0.6;
|
parameter = 1.4;
|
||||||
System.out.println("Value must be positive. Defaulting to 0.6.");
|
System.out.println("Value must be positive. Defaulting to 1.4.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 4 -> {
|
||||||
|
BiGpairSEQ.setDistributionType(DistributionType.ZIPF);
|
||||||
|
System.out.print("Please enter exponent value for Zipf distribution: ");
|
||||||
|
parameter = sc.nextDouble();
|
||||||
|
if (parameter <= 0.0) {
|
||||||
|
parameter = 1.4;
|
||||||
|
System.out.println("Value must be positive. Defaulting to 1.4.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default -> {
|
default -> {
|
||||||
System.out.println("Invalid input. Defaulting to exponential.");
|
System.out.println("Invalid input. Defaulting to exponential.");
|
||||||
exponential = true;
|
parameter = 1.4;
|
||||||
|
BiGpairSEQ.setDistributionType(DistributionType.EXPONENTIAL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
System.out.print("\nNumber of wells on plate: ");
|
System.out.print("\nNumber of wells on plate: ");
|
||||||
@@ -226,17 +237,18 @@ public class InteractiveInterface {
|
|||||||
assert filename != null;
|
assert filename != null;
|
||||||
Plate samplePlate;
|
Plate samplePlate;
|
||||||
PlateFileWriter writer;
|
PlateFileWriter writer;
|
||||||
if(exponential){
|
DistributionType type = BiGpairSEQ.getDistributionType();
|
||||||
samplePlate = new Plate(cells, cellFile, numWells, populations, dropOutRate, lambda, true);
|
switch(type) {
|
||||||
|
case POISSON -> {
|
||||||
|
parameter = Math.sqrt(cells.getCellCount()); //gaussian with square root of elements approximates poisson
|
||||||
|
samplePlate = new Plate(cells, cellFile, numWells, populations, dropOutRate, parameter);
|
||||||
writer = new PlateFileWriter(filename, samplePlate);
|
writer = new PlateFileWriter(filename, samplePlate);
|
||||||
}
|
}
|
||||||
else {
|
default -> {
|
||||||
if (poisson) {
|
samplePlate = new Plate(cells, cellFile, numWells, populations, dropOutRate, parameter);
|
||||||
stdDev = Math.sqrt(cells.getCellCount()); //gaussian with square root of elements approximates poisson
|
|
||||||
}
|
|
||||||
samplePlate = new Plate(cells, cellFile, numWells, populations, dropOutRate, stdDev, false);
|
|
||||||
writer = new PlateFileWriter(filename, samplePlate);
|
writer = new PlateFileWriter(filename, samplePlate);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
System.out.println("Writing Sample Plate to file");
|
System.out.println("Writing Sample Plate to file");
|
||||||
writer.writePlateFile();
|
writer.writePlateFile();
|
||||||
System.out.println("Sample Plate written to file: " + filename);
|
System.out.println("Sample Plate written to file: " + filename);
|
||||||
@@ -255,6 +267,7 @@ public class InteractiveInterface {
|
|||||||
int readDepth = 1;
|
int readDepth = 1;
|
||||||
double readErrorRate = 0.0;
|
double readErrorRate = 0.0;
|
||||||
double errorCollisionRate = 0.0;
|
double errorCollisionRate = 0.0;
|
||||||
|
double realSequenceCollisionRate = 0.0;
|
||||||
try {
|
try {
|
||||||
String str = "\nGenerating bipartite weighted graph encoding occupancy overlap data ";
|
String str = "\nGenerating bipartite weighted graph encoding occupancy overlap data ";
|
||||||
str = str.concat("\nrequires a cell sample file and a sample plate file.");
|
str = str.concat("\nrequires a cell sample file and a sample plate file.");
|
||||||
@@ -264,7 +277,6 @@ public class InteractiveInterface {
|
|||||||
System.out.print("\nPlease enter name of an existing sample plate file: ");
|
System.out.print("\nPlease enter name of an existing sample plate file: ");
|
||||||
plateFile = sc.next();
|
plateFile = sc.next();
|
||||||
System.out.println("\nEnable simulation of sequence read depth and sequence read errors? (y/n)");
|
System.out.println("\nEnable simulation of sequence read depth and sequence read errors? (y/n)");
|
||||||
System.out.println("NOTE: sample plate data cannot be cached when simulating read errors");
|
|
||||||
String ans = sc.next();
|
String ans = sc.next();
|
||||||
Pattern pattern = Pattern.compile("(?:yes|y)", Pattern.CASE_INSENSITIVE);
|
Pattern pattern = Pattern.compile("(?:yes|y)", Pattern.CASE_INSENSITIVE);
|
||||||
Matcher matcher = pattern.matcher(ans);
|
Matcher matcher = pattern.matcher(ans);
|
||||||
@@ -272,25 +284,29 @@ public class InteractiveInterface {
|
|||||||
simulateReadDepth = true;
|
simulateReadDepth = true;
|
||||||
}
|
}
|
||||||
if (simulateReadDepth) {
|
if (simulateReadDepth) {
|
||||||
BiGpairSEQ.setCachePlate(false);
|
System.out.print("\nPlease enter the read depth (the integer number of times a sequence is read): ");
|
||||||
BiGpairSEQ.clearPlateInMemory();
|
|
||||||
System.out.print("\nPlease enter read depth (the integer number of reads per sequence): ");
|
|
||||||
readDepth = sc.nextInt();
|
readDepth = sc.nextInt();
|
||||||
if(readDepth < 1) {
|
if(readDepth < 1) {
|
||||||
throw new InputMismatchException("The read depth must be an integer >= 1");
|
throw new InputMismatchException("The read depth must be an integer >= 1");
|
||||||
}
|
}
|
||||||
System.out.print("\nPlease enter probability of a sequence read error (0.0 to 1.0): ");
|
System.out.println("\nPlease enter the read error probability (0.0 to 1.0)");
|
||||||
|
System.out.print("(The probability that a sequence will be misread): ");
|
||||||
readErrorRate = sc.nextDouble();
|
readErrorRate = sc.nextDouble();
|
||||||
if(readErrorRate < 0.0 || readErrorRate > 1.0) {
|
if(readErrorRate < 0.0 || readErrorRate > 1.0) {
|
||||||
throw new InputMismatchException("The read error rate must be in the range [0.0, 1.0]");
|
throw new InputMismatchException("The read error probability must be in the range [0.0, 1.0]");
|
||||||
}
|
}
|
||||||
System.out.println("\nPlease enter the probability of read error collision");
|
System.out.println("\nPlease enter the error collision probability (0.0 to 1.0)");
|
||||||
System.out.println("(the likelihood that two read errors produce the same spurious sequence)");
|
System.out.print("(The probability of a sequence being misread in a way it has been misread before): ");
|
||||||
System.out.print("(0.0 to 1.0): ");
|
|
||||||
errorCollisionRate = sc.nextDouble();
|
errorCollisionRate = sc.nextDouble();
|
||||||
if(errorCollisionRate < 0.0 || errorCollisionRate > 1.0) {
|
if(errorCollisionRate < 0.0 || errorCollisionRate > 1.0) {
|
||||||
throw new InputMismatchException("The error collision probability must be an in the range [0.0, 1.0]");
|
throw new InputMismatchException("The error collision probability must be an in the range [0.0, 1.0]");
|
||||||
}
|
}
|
||||||
|
System.out.println("\nPlease enter the real sequence collision probability (0.0 to 1.0)");
|
||||||
|
System.out.print("(The probability that a (non-collision) misread produces a different, real sequence): ");
|
||||||
|
realSequenceCollisionRate = sc.nextDouble();
|
||||||
|
if(realSequenceCollisionRate < 0.0 || realSequenceCollisionRate > 1.0) {
|
||||||
|
throw new InputMismatchException("The real sequence collision probability must be an in the range [0.0, 1.0]");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
System.out.println("\nThe graph and occupancy data will be written to a file.");
|
System.out.println("\nThe graph and occupancy data will be written to a file.");
|
||||||
System.out.print("Please enter a name for the output file: ");
|
System.out.print("Please enter a name for the output file: ");
|
||||||
@@ -338,7 +354,8 @@ public class InteractiveInterface {
|
|||||||
System.out.println("Returning to main menu.");
|
System.out.println("Returning to main menu.");
|
||||||
}
|
}
|
||||||
else{
|
else{
|
||||||
GraphWithMapData data = Simulator.makeCDR3Graph(cellSample, plate, readDepth, readErrorRate, errorCollisionRate, true);
|
GraphWithMapData data = Simulator.makeCDR3Graph(cellSample, plate, readDepth, readErrorRate,
|
||||||
|
errorCollisionRate, realSequenceCollisionRate, true);
|
||||||
assert filename != null;
|
assert filename != null;
|
||||||
if(BiGpairSEQ.outputBinary()) {
|
if(BiGpairSEQ.outputBinary()) {
|
||||||
GraphDataObjectWriter dataWriter = new GraphDataObjectWriter(filename, data);
|
GraphDataObjectWriter dataWriter = new GraphDataObjectWriter(filename, data);
|
||||||
@@ -417,7 +434,7 @@ public class InteractiveInterface {
|
|||||||
}
|
}
|
||||||
//simulate matching
|
//simulate matching
|
||||||
MatchingResult results = Simulator.matchCDR3s(data, graphFilename, lowThreshold, highThreshold, maxOccupancyDiff,
|
MatchingResult results = Simulator.matchCDR3s(data, graphFilename, lowThreshold, highThreshold, maxOccupancyDiff,
|
||||||
minOverlapPercent, true);
|
minOverlapPercent, true, BiGpairSEQ.calculatePValue());
|
||||||
//write results to file
|
//write results to file
|
||||||
assert filename != null;
|
assert filename != null;
|
||||||
MatchingFileWriter writer = new MatchingFileWriter(filename, results);
|
MatchingFileWriter writer = new MatchingFileWriter(filename, results);
|
||||||
@@ -539,7 +556,8 @@ public class InteractiveInterface {
|
|||||||
System.out.println("3) Turn " + getOnOff(!BiGpairSEQ.cacheGraph()) + " graph/data file caching");
|
System.out.println("3) Turn " + getOnOff(!BiGpairSEQ.cacheGraph()) + " graph/data file caching");
|
||||||
System.out.println("4) Turn " + getOnOff(!BiGpairSEQ.outputBinary()) + " serialized binary graph output");
|
System.out.println("4) Turn " + getOnOff(!BiGpairSEQ.outputBinary()) + " serialized binary graph output");
|
||||||
System.out.println("5) Turn " + getOnOff(!BiGpairSEQ.outputGraphML()) + " GraphML graph output (for data portability to other programs)");
|
System.out.println("5) Turn " + getOnOff(!BiGpairSEQ.outputGraphML()) + " GraphML graph output (for data portability to other programs)");
|
||||||
System.out.println("6) Maximum weight matching algorithm options");
|
System.out.println("6) Turn " + getOnOff(!BiGpairSEQ.calculatePValue()) + " calculation of p-values");
|
||||||
|
System.out.println("7) Maximum weight matching algorithm options");
|
||||||
System.out.println("0) Return to main menu");
|
System.out.println("0) Return to main menu");
|
||||||
try {
|
try {
|
||||||
input = sc.nextInt();
|
input = sc.nextInt();
|
||||||
@@ -549,7 +567,8 @@ public class InteractiveInterface {
|
|||||||
case 3 -> BiGpairSEQ.setCacheGraph(!BiGpairSEQ.cacheGraph());
|
case 3 -> BiGpairSEQ.setCacheGraph(!BiGpairSEQ.cacheGraph());
|
||||||
case 4 -> BiGpairSEQ.setOutputBinary(!BiGpairSEQ.outputBinary());
|
case 4 -> BiGpairSEQ.setOutputBinary(!BiGpairSEQ.outputBinary());
|
||||||
case 5 -> BiGpairSEQ.setOutputGraphML(!BiGpairSEQ.outputGraphML());
|
case 5 -> BiGpairSEQ.setOutputGraphML(!BiGpairSEQ.outputGraphML());
|
||||||
case 6 -> algorithmOptions();
|
case 6 -> BiGpairSEQ.setCalculatePValue(!BiGpairSEQ.calculatePValue());
|
||||||
|
case 7 -> algorithmOptions();
|
||||||
case 0 -> backToMain = true;
|
case 0 -> backToMain = true;
|
||||||
default -> System.out.println("Invalid input");
|
default -> System.out.println("Invalid input");
|
||||||
}
|
}
|
||||||
@@ -575,24 +594,37 @@ public class InteractiveInterface {
|
|||||||
boolean backToOptions = false;
|
boolean backToOptions = false;
|
||||||
while(!backToOptions) {
|
while(!backToOptions) {
|
||||||
System.out.println("\n---------ALGORITHM OPTIONS----------");
|
System.out.println("\n---------ALGORITHM OPTIONS----------");
|
||||||
System.out.println("1) Use scaling algorithm by Duan and Su.");
|
System.out.println("1) Use Hungarian algorithm with Fibonacci heap priority queue");
|
||||||
System.out.println("2) Use LEDA book algorithm with Fibonacci heap priority queue");
|
System.out.println("2) Use Hungarian algorithm with pairing heap priority queue");
|
||||||
System.out.println("3) Use LEDA book algorithm with pairing heap priority queue");
|
System.out.println("3) Use auction algorithm");
|
||||||
|
System.out.println("4) Use integer weight scaling algorithm by Duan and Su. (buggy, not yet fully implemented!)");
|
||||||
System.out.println("0) Return to Options menu");
|
System.out.println("0) Return to Options menu");
|
||||||
try {
|
try {
|
||||||
input = sc.nextInt();
|
input = sc.nextInt();
|
||||||
switch (input) {
|
switch (input) {
|
||||||
case 1 -> System.out.println("This option is not yet implemented. Choose another.");
|
case 1 -> {
|
||||||
case 2 -> {
|
BiGpairSEQ.setHungarianAlgorithm();
|
||||||
BiGpairSEQ.setFibonacciHeap();
|
BiGpairSEQ.setFibonacciHeap();
|
||||||
System.out.println("MWM algorithm set to LEDA with Fibonacci heap");
|
System.out.println("MWM algorithm set to Hungarian with Fibonacci heap");
|
||||||
|
backToOptions = true;
|
||||||
|
}
|
||||||
|
case 2 -> {
|
||||||
|
BiGpairSEQ.setHungarianAlgorithm();
|
||||||
|
BiGpairSEQ.setPairingHeap();
|
||||||
|
System.out.println("MWM algorithm set to Hungarian with pairing heap");
|
||||||
backToOptions = true;
|
backToOptions = true;
|
||||||
}
|
}
|
||||||
case 3 -> {
|
case 3 -> {
|
||||||
BiGpairSEQ.setPairingHeap();
|
BiGpairSEQ.setAuctionAlgorithm();
|
||||||
System.out.println("MWM algorithm set to LEDA with pairing heap");
|
System.out.println("MWM algorithm set to auction");
|
||||||
backToOptions = true;
|
backToOptions = true;
|
||||||
}
|
}
|
||||||
|
case 4 -> {
|
||||||
|
System.out.println("Scaling integer weight MWM algorithm not yet fully implemented. Sorry.");
|
||||||
|
// BiGpairSEQ.setIntegerWeightScalingAlgorithm();
|
||||||
|
// System.out.println("MWM algorithm set to integer weight scaling algorithm of Duan and Su");
|
||||||
|
// backToOptions = true;
|
||||||
|
}
|
||||||
case 0 -> backToOptions = true;
|
case 0 -> backToOptions = true;
|
||||||
default -> System.out.println("Invalid input");
|
default -> System.out.println("Invalid input");
|
||||||
}
|
}
|
||||||
|
|||||||
177
src/main/java/MaximumIntegerWeightBipartiteAuctionMatching.java
Normal file
177
src/main/java/MaximumIntegerWeightBipartiteAuctionMatching.java
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
import org.jgrapht.Graph;
|
||||||
|
import org.jgrapht.GraphTests;
|
||||||
|
import org.jgrapht.alg.interfaces.MatchingAlgorithm;
|
||||||
|
|
||||||
|
import java.math.BigDecimal;
|
||||||
|
import java.util.*;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Maximum weight matching in bipartite graphs with strictly integer edge weights, using a forward auction algorithm.
|
||||||
|
* This implementation uses the Gauss-Seidel version of the forward auction algorithm, in which bids are submitted
|
||||||
|
* one at a time. For any weighted bipartite graph with n vertices in the smaller partition, this algorithm will produce
|
||||||
|
* a matching that is within n*epsilon of being optimal. Using an epsilon = 1/(n+1) ensures that this matching differs
|
||||||
|
* from an optimal matching by <1. Thus, for a bipartite graph with strictly integer weights, this algorithm returns
|
||||||
|
* a maximum weight matching.
|
||||||
|
*
|
||||||
|
* See:
|
||||||
|
* "Towards auction algorithms for large dense assignment problems"
|
||||||
|
* Libor Buš and Pavel Tvrdík, Comput Optim Appl (2009) 43:411-436
|
||||||
|
* https://link.springer.com/article/10.1007/s10589-007-9146-5
|
||||||
|
*
|
||||||
|
* See also:
|
||||||
|
* Many books and papers by Dimitri Bertsekas, including chapter 4 of Linear Network Optimization:
|
||||||
|
* https://web.mit.edu/dimitrib/www/LNets_Full_Book.pdf
|
||||||
|
*
|
||||||
|
* @param <V> the graph vertex type
|
||||||
|
* @param <E> the graph edge type
|
||||||
|
*
|
||||||
|
* @author Eugene Fischer
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class MaximumIntegerWeightBipartiteAuctionMatching<V, E> implements MatchingAlgorithm<V, E> {
|
||||||
|
|
||||||
|
private final Graph<V, E> graph;
|
||||||
|
private final Set<V> partition1;
|
||||||
|
private final Set<V> partition2;
|
||||||
|
private final BigDecimal epsilon;
|
||||||
|
private final Set<E> matching;
|
||||||
|
private BigDecimal matchingWeight;
|
||||||
|
|
||||||
|
private boolean swappedPartitions = false;
|
||||||
|
|
||||||
|
public MaximumIntegerWeightBipartiteAuctionMatching(Graph<V, E> graph, Set<V> partition1, Set<V> partition2) {
|
||||||
|
this.graph = GraphTests.requireUndirected(graph);
|
||||||
|
this.partition1 = Objects.requireNonNull(partition1, "Partition 1 cannot be null");
|
||||||
|
this.partition2 = Objects.requireNonNull(partition2, "Partition 2 cannot be null");
|
||||||
|
int n = Math.max(partition1.size(), partition2.size());
|
||||||
|
this.epsilon = BigDecimal.valueOf(1 / ((double) n + 1)); //The minimum price increase of a bid
|
||||||
|
this.matching = new LinkedHashSet<>();
|
||||||
|
this.matchingWeight = BigDecimal.ZERO;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Method coded using MaximumWeightBipartiteMatching.class from JgraphT as a model
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Matching<V, E> getMatching() {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test input instance
|
||||||
|
*/
|
||||||
|
if (!GraphTests.isSimple(graph)) {
|
||||||
|
throw new IllegalArgumentException("Only simple graphs supported");
|
||||||
|
}
|
||||||
|
if (!GraphTests.isBipartitePartition(graph, partition1, partition2)) {
|
||||||
|
throw new IllegalArgumentException("Graph partition is not bipartite");
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
If the two partitions are different sizes, the bidders must be the smaller of the two partitions.
|
||||||
|
*/
|
||||||
|
Set<V> items;
|
||||||
|
Set<V> bidders;
|
||||||
|
if (partition2.size() >= partition1.size()) {
|
||||||
|
bidders = partition1;
|
||||||
|
items = partition2;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
bidders = partition2;
|
||||||
|
items = partition1;
|
||||||
|
swappedPartitions = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Create a map to track the owner of each item, which is initially null,
|
||||||
|
and a map to track the price of each item, which is initially 0. An
|
||||||
|
Initial price of 0 allows for asymmetric assignment (though does mean
|
||||||
|
that this form of the algorithm cannot take advantage of epsilon-scaling).
|
||||||
|
*/
|
||||||
|
Map<V, V> owners = new HashMap<>();
|
||||||
|
Map<V, BigDecimal> prices = new HashMap<>();
|
||||||
|
for(V item: items) {
|
||||||
|
owners.put(item, null);
|
||||||
|
prices.put(item, BigDecimal.ZERO);
|
||||||
|
}
|
||||||
|
|
||||||
|
//Create a queue of bidders that don't currently own an item, which is initially all of them
|
||||||
|
Queue<V> unmatchedBidders = new ArrayDeque<>();
|
||||||
|
for(V bidder: bidders) {
|
||||||
|
unmatchedBidders.offer(bidder);
|
||||||
|
}
|
||||||
|
|
||||||
|
//Run the auction while there are remaining unmatched bidders
|
||||||
|
while (unmatchedBidders.size() > 0) {
|
||||||
|
V bidder = unmatchedBidders.poll();
|
||||||
|
V item = null;
|
||||||
|
BigDecimal bestValue = BigDecimal.valueOf(-1.0);
|
||||||
|
BigDecimal runnerUpValue = BigDecimal.valueOf(-1.0);
|
||||||
|
/*
|
||||||
|
Find the items that offer the best and second-best value for the bidder,
|
||||||
|
then submit a bid equal to the price of the best-valued item plus the marginal value over
|
||||||
|
the second-best-valued item plus epsilon.
|
||||||
|
*/
|
||||||
|
for (E edge: graph.edgesOf(bidder)) {
|
||||||
|
double weight = graph.getEdgeWeight(edge);
|
||||||
|
if(weight == 0.0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
V tmp = getItem(edge);
|
||||||
|
BigDecimal value = BigDecimal.valueOf(weight).subtract(prices.get(tmp));
|
||||||
|
if (value.compareTo(bestValue) >= 0) {
|
||||||
|
runnerUpValue = bestValue;
|
||||||
|
bestValue = value;
|
||||||
|
item = tmp;
|
||||||
|
}
|
||||||
|
else if (value.compareTo(runnerUpValue) >= 0) {
|
||||||
|
runnerUpValue = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(bestValue.compareTo(BigDecimal.ZERO) >= 0) {
|
||||||
|
V formerOwner = owners.get(item);
|
||||||
|
BigDecimal price = prices.get(item);
|
||||||
|
BigDecimal bid = price.add(bestValue).subtract(runnerUpValue).add(epsilon);
|
||||||
|
if (formerOwner != null) {
|
||||||
|
unmatchedBidders.offer(formerOwner);
|
||||||
|
}
|
||||||
|
owners.put(item, bidder);
|
||||||
|
prices.put(item, bid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//Add all edges between items and their owners to the matching
|
||||||
|
for (V item: owners.keySet()) {
|
||||||
|
if (owners.get(item) != null) {
|
||||||
|
matching.add(graph.getEdge(item, owners.get(item)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//Sum the edges of the matching to obtain the matching weight
|
||||||
|
for(E edge: matching) {
|
||||||
|
this.matchingWeight = this.matchingWeight.add(BigDecimal.valueOf(graph.getEdgeWeight(edge)));
|
||||||
|
}
|
||||||
|
|
||||||
|
return new MatchingImpl<>(graph, matching, matchingWeight.doubleValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
private V getItem(E edge) {
|
||||||
|
if (swappedPartitions) {
|
||||||
|
return graph.getEdgeSource(edge);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return graph.getEdgeTarget(edge);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// //method for implementing a forward-reverse auction algorithm, not used here
|
||||||
|
// private V getBidder(E edge) {
|
||||||
|
// if (swappedPartitions) {
|
||||||
|
// return graph.getEdgeTarget(edge);
|
||||||
|
// }
|
||||||
|
// else {
|
||||||
|
// return graph.getEdgeSource(edge);
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
public BigDecimal getMatchingWeight() {
|
||||||
|
return matchingWeight;
|
||||||
|
}
|
||||||
|
}
|
||||||
1284
src/main/java/MaximumIntegerWeightBipartiteMatching.java
Normal file
1284
src/main/java/MaximumIntegerWeightBipartiteMatching.java
Normal file
File diff suppressed because it is too large
Load Diff
212
src/main/java/MaximumWeightBipartiteLookBackAuctionMatching.java
Normal file
212
src/main/java/MaximumWeightBipartiteLookBackAuctionMatching.java
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
import org.jgrapht.Graph;
|
||||||
|
import org.jgrapht.GraphTests;
|
||||||
|
import org.jgrapht.alg.interfaces.MatchingAlgorithm;
|
||||||
|
import org.jgrapht.alg.util.Pair;
|
||||||
|
|
||||||
|
import java.math.BigDecimal;
|
||||||
|
import java.util.*;
|
||||||
|
|
||||||
|
/*
|
||||||
|
Maximum weight matching in bipartite graphs with strictly integer edge weights, found using the
|
||||||
|
unscaled look-back auction algorithm
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class MaximumWeightBipartiteLookBackAuctionMatching<V, E> implements MatchingAlgorithm<V, E> {
|
||||||
|
|
||||||
|
private final Graph<V, E> graph;
|
||||||
|
private final Set<V> partition1;
|
||||||
|
private final Set<V> partition2;
|
||||||
|
private final BigDecimal delta;
|
||||||
|
private final Set<E> matching;
|
||||||
|
private BigDecimal matchingWeight;
|
||||||
|
private boolean swappedPartitions = false;
|
||||||
|
|
||||||
|
public MaximumWeightBipartiteLookBackAuctionMatching(Graph<V, E> graph, Set<V> partition1, Set<V> partition2) {
|
||||||
|
this.graph = GraphTests.requireUndirected(graph);
|
||||||
|
this.partition1 = Objects.requireNonNull(partition1, "Partition 1 cannot be null");
|
||||||
|
this.partition2 = Objects.requireNonNull(partition2, "Partition 2 cannot be null");
|
||||||
|
int n = Math.max(partition1.size(), partition2.size());
|
||||||
|
this.delta = BigDecimal.valueOf(1 / ((double) n + 1));
|
||||||
|
this.matching = new LinkedHashSet<>();
|
||||||
|
this.matchingWeight = BigDecimal.ZERO;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Method coded using MaximumWeightBipartiteMatching.class from JgraphT as a model
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Matching<V, E> getMatching() {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test input instance
|
||||||
|
*/
|
||||||
|
if (!GraphTests.isSimple(graph)) {
|
||||||
|
throw new IllegalArgumentException("Only simple graphs supported");
|
||||||
|
}
|
||||||
|
if (!GraphTests.isBipartitePartition(graph, partition1, partition2)) {
|
||||||
|
throw new IllegalArgumentException("Graph partition is not bipartite");
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
If the two partitions are different sizes, the bidders must be the smaller of the two partitions.
|
||||||
|
*/
|
||||||
|
Set<V> items;
|
||||||
|
Set<V> bidders;
|
||||||
|
if (partition2.size() >= partition1.size()) {
|
||||||
|
bidders = partition1;
|
||||||
|
items = partition2;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
bidders = partition2;
|
||||||
|
items = partition1;
|
||||||
|
swappedPartitions = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Create a map to track the owner of each item, which is initially null,
|
||||||
|
and a map to track the price of each item, which is initially 0.
|
||||||
|
*/
|
||||||
|
Map<V, V> owners = new HashMap<>();
|
||||||
|
|
||||||
|
/*
|
||||||
|
Create a map to track the prices of the objects
|
||||||
|
*/
|
||||||
|
Map<V, BigDecimal> prices = new HashMap<>();
|
||||||
|
for(V item: items) {
|
||||||
|
owners.put(item, null);
|
||||||
|
prices.put(item, BigDecimal.ZERO);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Create a map to track the most valuable object for a bidder
|
||||||
|
*/
|
||||||
|
Map<V, V> mostValuableItems = new HashMap<>();
|
||||||
|
|
||||||
|
/*
|
||||||
|
Create a map to track the second most valuable object for a bidder
|
||||||
|
*/
|
||||||
|
Map<V, V> runnerUpItems = new HashMap<>();
|
||||||
|
|
||||||
|
/*
|
||||||
|
Create a map to track the bidder value thresholds
|
||||||
|
*/
|
||||||
|
Map<V, BigDecimal> valueThresholds = new HashMap<>();
|
||||||
|
|
||||||
|
|
||||||
|
//Initialize queue of all bidders that don't currently own an item
|
||||||
|
Queue<V> unmatchedBidders = new ArrayDeque<>();
|
||||||
|
for(V bidder: bidders) {
|
||||||
|
unmatchedBidders.offer(bidder);
|
||||||
|
valueThresholds.put(bidder, BigDecimal.ZERO);
|
||||||
|
mostValuableItems.put(bidder, null);
|
||||||
|
runnerUpItems.put(bidder, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
while (unmatchedBidders.size() > 0) {
|
||||||
|
V bidder = unmatchedBidders.poll();
|
||||||
|
// BigDecimal valueThreshold = valueThresholds.get(bidder);
|
||||||
|
BigDecimal bestValue = BigDecimal.ZERO;
|
||||||
|
BigDecimal runnerUpValue = BigDecimal.ZERO;
|
||||||
|
boolean reinitialize = true;
|
||||||
|
// if (mostValuableItems.get(bidder) != null && runnerUpItems.get(bidder) != null) {
|
||||||
|
// reinitialize = false;
|
||||||
|
// //get the weight of the edge between the bidder and the best valued item
|
||||||
|
// V bestItem = mostValuableItems.get(bidder);
|
||||||
|
// BigDecimal bestItemWeight = BigDecimal.valueOf(graph.getEdgeWeight(graph.getEdge(bidder, bestItem)));
|
||||||
|
// bestValue = bestItemWeight.subtract(prices.get(bestItem));
|
||||||
|
// V runnerUpItem = runnerUpItems.get(bidder);
|
||||||
|
// BigDecimal runnerUpWeight = BigDecimal.valueOf(graph.getEdgeWeight(graph.getEdge(bidder, runnerUpItem)));
|
||||||
|
// runnerUpValue = runnerUpWeight.subtract(prices.get(runnerUpItem));
|
||||||
|
// //if both values are still above the threshold
|
||||||
|
// if (bestValue.compareTo(valueThreshold) >= 0 && runnerUpValue.compareTo(valueThreshold) >= 0) {
|
||||||
|
// if (bestValue.compareTo(runnerUpValue) < 0) { //if best value is lower than runner up
|
||||||
|
// BigDecimal tmp = bestValue;
|
||||||
|
// bestValue = runnerUpValue;
|
||||||
|
// runnerUpValue = tmp;
|
||||||
|
// mostValuableItems.put(bidder, runnerUpItem);
|
||||||
|
// runnerUpItems.put(bidder, bestItem);
|
||||||
|
// }
|
||||||
|
// BigDecimal newValueThreshold = bestValue.min(runnerUpValue);
|
||||||
|
// valueThresholds.put(bidder, newValueThreshold);
|
||||||
|
// System.out.println("lookback successful");
|
||||||
|
// }
|
||||||
|
// else {
|
||||||
|
// reinitialize = true; //lookback failed
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
if (reinitialize){
|
||||||
|
bestValue = BigDecimal.ZERO;
|
||||||
|
runnerUpValue = BigDecimal.ZERO;
|
||||||
|
for (E edge: graph.edgesOf(bidder)) {
|
||||||
|
double weight = graph.getEdgeWeight(edge);
|
||||||
|
if (weight == 0.0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
V tmpItem = getItem(bidder, edge);
|
||||||
|
BigDecimal tmpValue = BigDecimal.valueOf(weight).subtract(prices.get(tmpItem));
|
||||||
|
if (tmpValue.compareTo(bestValue) >= 0) {
|
||||||
|
runnerUpValue = bestValue;
|
||||||
|
bestValue = tmpValue;
|
||||||
|
runnerUpItems.put(bidder, mostValuableItems.get(bidder));
|
||||||
|
mostValuableItems.put(bidder, tmpItem);
|
||||||
|
}
|
||||||
|
else if (tmpValue.compareTo(runnerUpValue) >= 0) {
|
||||||
|
runnerUpValue = tmpValue;
|
||||||
|
runnerUpItems.put(bidder, tmpItem);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
valueThresholds.put(bidder, runnerUpValue);
|
||||||
|
}
|
||||||
|
//Should now have initialized the maps to make look back possible
|
||||||
|
//skip this bidder if the best value is still zero
|
||||||
|
if (BigDecimal.ZERO.equals(bestValue)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
V mostValuableItem = mostValuableItems.get(bidder);
|
||||||
|
BigDecimal price = prices.get(mostValuableItem);
|
||||||
|
BigDecimal bid = price.add(bestValue).subtract(runnerUpValue).add(this.delta);
|
||||||
|
V formerOwner = owners.get(mostValuableItem);
|
||||||
|
if (formerOwner != null) {
|
||||||
|
unmatchedBidders.offer(formerOwner);
|
||||||
|
}
|
||||||
|
owners.put(mostValuableItem, bidder);
|
||||||
|
prices.put(mostValuableItem, bid);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (V item: owners.keySet()) {
|
||||||
|
if (owners.get(item) != null) {
|
||||||
|
matching.add(graph.getEdge(item, owners.get(item)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for(E edge: matching) {
|
||||||
|
this.matchingWeight = this.matchingWeight.add(BigDecimal.valueOf(graph.getEdgeWeight(edge)));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
return new MatchingImpl<>(graph, matching, matchingWeight.doubleValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
private V getItem(V bidder, E edge) {
|
||||||
|
if (swappedPartitions) {
|
||||||
|
return graph.getEdgeSource(edge);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return graph.getEdgeTarget(edge);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private V getBidder(V item, E edge) {
|
||||||
|
if (swappedPartitions) {
|
||||||
|
return graph.getEdgeTarget(edge);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return graph.getEdgeSource(edge);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public BigDecimal getMatchingWeight() {
|
||||||
|
return matchingWeight;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,10 +2,21 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
TODO: Implement exponential distribution using inversion method - DONE
|
TODO: Implement exponential distribution using inversion method - DONE
|
||||||
|
TODO: Implement collisions with real sequences by having the counting function keep a map of all sequences it's read,
|
||||||
|
with values of all misreads. Can then have a spurious/real collision rate, which will have count randomly select a sequence
|
||||||
|
it's already read at least once, and put that into the list of spurious sequences for the given real sequence. Will let me get rid
|
||||||
|
of the distinctMisreadCount map, and use this new map instead. Doing it this way, once a sequence has been misread as another
|
||||||
|
sequence once, it is more likely to be misread that way again, as future read error collisions can also be real sequence collisions
|
||||||
|
Prob A: a read error occurs. Prob B: it's a new error (otherwise it's a repeated error). Prob C: if new error, prob that it's
|
||||||
|
a real sequence collision (otherwise it's a new spurious sequence) - DONE
|
||||||
TODO: Implement discrete frequency distributions using Vose's Alias Method
|
TODO: Implement discrete frequency distributions using Vose's Alias Method
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
import org.apache.commons.rng.sampling.distribution.RejectionInversionZipfSampler;
|
||||||
|
import org.apache.commons.rng.simple.JDKRandomWrapper;
|
||||||
|
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
public class Plate {
|
public class Plate {
|
||||||
@@ -19,25 +30,22 @@ public class Plate {
|
|||||||
private Integer[] populations;
|
private Integer[] populations;
|
||||||
private double stdDev;
|
private double stdDev;
|
||||||
private double lambda;
|
private double lambda;
|
||||||
boolean exponential = false;
|
private double zipfExponent;
|
||||||
|
private DistributionType distributionType;
|
||||||
|
|
||||||
public Plate(CellSample cells, String cellFilename, int numWells, Integer[] populations,
|
public Plate(CellSample cells, String cellFilename, int numWells, Integer[] populations,
|
||||||
double dropoutRate, double stdDev_or_lambda, boolean exponential){
|
double dropoutRate, double parameter){
|
||||||
this.cells = cells;
|
this.cells = cells;
|
||||||
this.sourceFile = cellFilename;
|
this.sourceFile = cellFilename;
|
||||||
this.size = numWells;
|
this.size = numWells;
|
||||||
this.wells = new ArrayList<>();
|
this.wells = new ArrayList<>();
|
||||||
this.error = dropoutRate;
|
this.error = dropoutRate;
|
||||||
this.populations = populations;
|
this.populations = populations;
|
||||||
this.exponential = exponential;
|
this.stdDev = parameter;
|
||||||
if (this.exponential) {
|
this.lambda = parameter;
|
||||||
this.lambda = stdDev_or_lambda;
|
this.zipfExponent = parameter;
|
||||||
fillWellsExponential(cells.getCells(), this.lambda);
|
this.distributionType = BiGpairSEQ.getDistributionType();
|
||||||
}
|
fillWells(cells.getCells());
|
||||||
else {
|
|
||||||
this.stdDev = stdDev_or_lambda;
|
|
||||||
fillWells(cells.getCells(), this.stdDev);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -54,21 +62,57 @@ public class Plate {
|
|||||||
this.wells = wells;
|
this.wells = wells;
|
||||||
this.size = wells.size();
|
this.size = wells.size();
|
||||||
|
|
||||||
|
double totalCellCount = 0.0;
|
||||||
|
double totalDropoutCount = 0.0;
|
||||||
List<Integer> concentrations = new ArrayList<>();
|
List<Integer> concentrations = new ArrayList<>();
|
||||||
for (List<String[]> w: wells) {
|
for (List<String[]> w: wells) {
|
||||||
if(!concentrations.contains(w.size())){
|
if(!concentrations.contains(w.size())){
|
||||||
concentrations.add(w.size());
|
concentrations.add(w.size());
|
||||||
}
|
}
|
||||||
|
for (String[] cell: w) {
|
||||||
|
totalCellCount += 1.0;
|
||||||
|
for (String sequence: cell) {
|
||||||
|
if("-1".equals(sequence)) {
|
||||||
|
totalDropoutCount += 1.0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
double totalSequenceCount = totalCellCount * 4;
|
||||||
|
this.error = totalDropoutCount / totalSequenceCount;
|
||||||
this.populations = new Integer[concentrations.size()];
|
this.populations = new Integer[concentrations.size()];
|
||||||
for (int i = 0; i < this.populations.length; i++) {
|
for (int i = 0; i < this.populations.length; i++) {
|
||||||
this.populations[i] = concentrations.get(i);
|
this.populations[i] = concentrations.get(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void fillWellsZipf(List<String[]> cells, double exponent) {
|
||||||
|
int numSections = populations.length;
|
||||||
|
int section = 0;
|
||||||
|
int n;
|
||||||
|
RejectionInversionZipfSampler zipfSampler = new RejectionInversionZipfSampler(new JDKRandomWrapper(rand), cells.size(), exponent);
|
||||||
|
while (section < numSections){
|
||||||
|
for (int i = 0; i < (size / numSections); i++) {
|
||||||
|
List<String[]> well = new ArrayList<>();
|
||||||
|
for (int j = 0; j < populations[section]; j++) {
|
||||||
|
do {
|
||||||
|
n = zipfSampler.sample();
|
||||||
|
} while (n >= cells.size() || n < 0);
|
||||||
|
String[] cellToAdd = cells.get(n).clone();
|
||||||
|
for(int k = 0; k < cellToAdd.length; k++){
|
||||||
|
if(Math.abs(rand.nextDouble()) < error){//error applied to each sequence
|
||||||
|
cellToAdd[k] = "-1";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
well.add(cellToAdd);
|
||||||
|
}
|
||||||
|
wells.add(well);
|
||||||
|
}
|
||||||
|
section++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void fillWellsExponential(List<String[]> cells, double lambda){
|
private void fillWellsExponential(List<String[]> cells, double lambda){
|
||||||
this.lambda = lambda;
|
|
||||||
exponential = true;
|
|
||||||
int numSections = populations.length;
|
int numSections = populations.length;
|
||||||
int section = 0;
|
int section = 0;
|
||||||
double m;
|
double m;
|
||||||
@@ -124,6 +168,24 @@ public class Plate {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void fillWells(List<String[]> cells){
|
||||||
|
DistributionType type = BiGpairSEQ.getDistributionType();
|
||||||
|
switch (type) {
|
||||||
|
case POISSON, GAUSSIAN -> {
|
||||||
|
fillWells(cells, getStdDev());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case EXPONENTIAL -> {
|
||||||
|
fillWellsExponential(cells, getLambda());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case ZIPF -> {
|
||||||
|
fillWellsZipf(cells, getZipfExponent());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public Integer[] getPopulations(){
|
public Integer[] getPopulations(){
|
||||||
return populations;
|
return populations;
|
||||||
}
|
}
|
||||||
@@ -136,10 +198,12 @@ public class Plate {
|
|||||||
return stdDev;
|
return stdDev;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isExponential(){return exponential;}
|
public DistributionType getDistributionType() { return distributionType;}
|
||||||
|
|
||||||
public double getLambda(){return lambda;}
|
public double getLambda(){return lambda;}
|
||||||
|
|
||||||
|
public double getZipfExponent(){return zipfExponent;}
|
||||||
|
|
||||||
public double getError() {
|
public double getError() {
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
@@ -148,79 +212,83 @@ public class Plate {
|
|||||||
return wells;
|
return wells;
|
||||||
}
|
}
|
||||||
|
|
||||||
// //returns a map of the counts of the sequence at cell index sIndex, in all wells
|
//For the sequences at cell indices sIndices, counts number of unique sequences in all wells.
|
||||||
// public void assayWellsSequenceS(Map<String, Integer> sequences, int... sIndices){
|
//Also simulates sequence read errors with given probabilities.
|
||||||
// this.assayWellsSequenceS(sequences, 0, size, sIndices);
|
//Returns a map of SequenceRecords containing plate data for all sequences read.
|
||||||
// }
|
//TODO actually implement usage of misreadSequences - DONE
|
||||||
//
|
|
||||||
// //returns a map of the counts of the sequence at cell index sIndex, in a specific well
|
|
||||||
// public void assayWellsSequenceS(Map<String, Integer> sequences, int n, int... sIndices) {
|
|
||||||
// this.assayWellsSequenceS(sequences, n, n+1, sIndices);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// //returns a map of the counts of the sequence at cell index sIndex, in a range of wells
|
|
||||||
// public void assayWellsSequenceS(Map<String, Integer> sequences, int start, int end, int... sIndices) {
|
|
||||||
// for(int sIndex: sIndices){
|
|
||||||
// for(int i = start; i < end; i++){
|
|
||||||
// countSequences(sequences, wells.get(i), sIndex);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// //For the sequences at cell indices sIndices, counts number of unique sequences in the given well into the given map
|
|
||||||
// private void countSequences(Map<String, Integer> wellMap, List<String[]> well, int... sIndices) {
|
|
||||||
// for(String[] cell : well) {
|
|
||||||
// for(int sIndex: sIndices){
|
|
||||||
// //skip dropout sequences, which have value -1
|
|
||||||
// if(!"-1".equals(cell[sIndex])){
|
|
||||||
// wellMap.merge(cell[sIndex], 1, (oldValue, newValue) -> oldValue + newValue);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
//For the sequences at cell indices sIndices, counts number of unique sequences in all well into the given map
|
|
||||||
public Map<String, SequenceRecord> countSequences(Integer readDepth, Double readErrorRate,
|
public Map<String, SequenceRecord> countSequences(Integer readDepth, Double readErrorRate,
|
||||||
Double errorCollisionRate, int... sIndices) {
|
Double errorCollisionRate, Double realSequenceCollisionRate, int... sIndices) {
|
||||||
SequenceType[] sequenceTypes = EnumSet.allOf(SequenceType.class).toArray(new SequenceType[0]);
|
SequenceType[] sequenceTypes = EnumSet.allOf(SequenceType.class).toArray(new SequenceType[0]);
|
||||||
Map<String, Integer> distinctMisreadCounts = new HashMap<>();
|
//Map of all real sequences read. Keys are sequences, values are ways sequence has been misread.
|
||||||
|
Map<String, List<String>> sequencesAndMisreads = new HashMap<>();
|
||||||
|
//Map of all sequences read. Keys are sequences, values are associated SequenceRecords
|
||||||
Map<String, SequenceRecord> sequenceMap = new LinkedHashMap<>();
|
Map<String, SequenceRecord> sequenceMap = new LinkedHashMap<>();
|
||||||
|
//get list of all distinct, real sequences
|
||||||
|
String[] realSequences = assayWells(sIndices).toArray(new String[0]);
|
||||||
for (int well = 0; well < size; well++) {
|
for (int well = 0; well < size; well++) {
|
||||||
for (String[] cell: wells.get(well)) {
|
for (String[] cell: wells.get(well)) {
|
||||||
for (int sIndex: sIndices) {
|
for (int sIndex: sIndices) {
|
||||||
|
//the sequence being read
|
||||||
|
String currentSequence = cell[sIndex];
|
||||||
//skip dropout sequences, which have value -1
|
//skip dropout sequences, which have value -1
|
||||||
if (!"-1".equals(cell[sIndex])) {
|
if (!"-1".equals(currentSequence)) {
|
||||||
|
//keep rereading the sequence until the read depth is reached
|
||||||
for (int j = 0; j < readDepth; j++) {
|
for (int j = 0; j < readDepth; j++) {
|
||||||
//Misread sequence
|
//The sequence is misread
|
||||||
if (rand.nextDouble() < readErrorRate) {
|
if (rand.nextDouble() < readErrorRate) {
|
||||||
StringBuilder spurious = new StringBuilder(cell[sIndex]);
|
//The sequence hasn't been read or misread before
|
||||||
//if this sequence hasn't been misread before, or the read error is unique,
|
if (!sequencesAndMisreads.containsKey(currentSequence)) {
|
||||||
//append one more "*" than has been appended before
|
sequencesAndMisreads.put(currentSequence, new ArrayList<>());
|
||||||
if (rand.nextDouble() > errorCollisionRate || !distinctMisreadCounts.containsKey(cell[sIndex])) {
|
}
|
||||||
distinctMisreadCounts.merge(cell[sIndex], 1, (oldValue, newValue) -> oldValue + newValue);
|
//The specific misread hasn't happened before
|
||||||
for (int k = 0; k < distinctMisreadCounts.get(cell[sIndex]); k++) {
|
if (rand.nextDouble() >= errorCollisionRate || sequencesAndMisreads.get(currentSequence).isEmpty()) {
|
||||||
|
//The misread doesn't collide with a real sequence already on the plate and some sequences have already been read
|
||||||
|
if(rand.nextDouble() >= realSequenceCollisionRate || !sequenceMap.isEmpty()){
|
||||||
|
StringBuilder spurious = new StringBuilder(currentSequence);
|
||||||
|
for (int k = 0; k <= sequencesAndMisreads.get(currentSequence).size(); k++) {
|
||||||
spurious.append("*");
|
spurious.append("*");
|
||||||
}
|
}
|
||||||
|
//New sequence record for the spurious sequence
|
||||||
SequenceRecord tmp = new SequenceRecord(spurious.toString(), sequenceTypes[sIndex]);
|
SequenceRecord tmp = new SequenceRecord(spurious.toString(), sequenceTypes[sIndex]);
|
||||||
tmp.addRead(well);
|
tmp.addRead(well);
|
||||||
sequenceMap.put(spurious.toString(), tmp);
|
sequenceMap.put(spurious.toString(), tmp);
|
||||||
|
//add spurious sequence to list of misreads for the real sequence
|
||||||
|
sequencesAndMisreads.get(currentSequence).add(spurious.toString());
|
||||||
}
|
}
|
||||||
//if this is a read error collision, randomly choose a number of "*"s that has been appended before
|
//The misread collides with a real sequence already read from plate
|
||||||
else {
|
else {
|
||||||
int starCount = rand.nextInt(distinctMisreadCounts.get(cell[sIndex]));
|
String wrongSequence;
|
||||||
for (int k = 0; k < starCount; k++) {
|
do{
|
||||||
spurious.append("*");
|
//get a random real sequence that's been read from the plate before
|
||||||
}
|
int index = rand.nextInt(realSequences.length);
|
||||||
sequenceMap.get(spurious.toString()).addRead(well);
|
wrongSequence = realSequences[index];
|
||||||
|
//make sure it's not accidentally the *right* sequence
|
||||||
|
//Also that it's not a wrong sequence already in the misread list
|
||||||
|
} while(currentSequence.equals(wrongSequence) || sequencesAndMisreads.get(currentSequence).contains(wrongSequence));
|
||||||
|
//update the SequenceRecord for wrongSequence
|
||||||
|
sequenceMap.get(wrongSequence).addRead(well);
|
||||||
|
//add wrongSequence to the misreads for currentSequence
|
||||||
|
sequencesAndMisreads.get(currentSequence).add(wrongSequence);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//sequence is read correctly
|
}
|
||||||
|
//The sequence is read correctly
|
||||||
else {
|
else {
|
||||||
if (!sequenceMap.containsKey(cell[sIndex])) {
|
//the sequence hasn't been read before
|
||||||
SequenceRecord tmp = new SequenceRecord(cell[sIndex], sequenceTypes[sIndex]);
|
if (!sequenceMap.containsKey(currentSequence)) {
|
||||||
|
//create new record for the sequence
|
||||||
|
SequenceRecord tmp = new SequenceRecord(currentSequence, sequenceTypes[sIndex]);
|
||||||
|
//add this read to the sequence record
|
||||||
tmp.addRead(well);
|
tmp.addRead(well);
|
||||||
sequenceMap.put(cell[sIndex], tmp);
|
//add the sequence and its record to the sequence map
|
||||||
} else {
|
sequenceMap.put(currentSequence, tmp);
|
||||||
sequenceMap.get(cell[sIndex]).addRead(well);
|
//add the sequence to the sequences and misreads map
|
||||||
|
sequencesAndMisreads.put(currentSequence, new ArrayList<>());
|
||||||
|
}
|
||||||
|
//the sequence has been read before
|
||||||
|
else {
|
||||||
|
//get the sequence's record and add this read to it
|
||||||
|
sequenceMap.get(currentSequence).addRead(well);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -231,97 +299,17 @@ public class Plate {
|
|||||||
return sequenceMap;
|
return sequenceMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private HashSet<String> assayWells(int[] indices) {
|
||||||
// //returns a map of the counts of the sequence at cell index sIndex, in all wells
|
HashSet<String> allSequences = new HashSet<>();
|
||||||
// //Simulates read depth and read errors, counts the number of reads of a unique sequence into the given map.
|
for (List<String[]> well: wells) {
|
||||||
// public void assayWellsSequenceSWithReadDepth(Map<String, Integer> misreadCounts, Map<String, Integer> occupancyMap, Map<String, Integer> readCountMap,
|
for (String[] cell: well) {
|
||||||
// int readDepth, double readErrorProb, double errorCollisionProb, int... sIndices) {
|
for(int index: indices) {
|
||||||
// this.assayWellsSequenceSWithReadDepth(misreadCounts, occupancyMap, readCountMap, readDepth, readErrorProb, errorCollisionProb, 0, size, sIndices);
|
allSequences.add(cell[index]);
|
||||||
// }
|
}
|
||||||
// //returns a map of the counts of the sequence at cell index sIndex, in a specific of wells
|
}
|
||||||
// //Simulates read depth and read errors, counts the number of reads of a unique sequence into the given map.
|
}
|
||||||
// public void assayWellsSequenceSWithReadDepth(Map<String, Integer> misreadCounts, Map<String, Integer> occupancyMap, Map<String, Integer> readCountMap,
|
return allSequences;
|
||||||
// int readDepth, double readErrorProb, double errorCollisionProb,
|
}
|
||||||
// int n, int... sIndices) {
|
|
||||||
// this.assayWellsSequenceSWithReadDepth(misreadCounts, occupancyMap, readCountMap, readDepth, readErrorProb, errorCollisionProb, n, n+1, sIndices);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// //returns a map of the counts of the sequence at cell index sIndex, in a range of wells
|
|
||||||
// //Simulates read depth and read errors, counts the number of reads of a unique sequence into the given map.
|
|
||||||
// public void assayWellsSequenceSWithReadDepth(Map<String, Integer> misreadCounts, Map<String, Integer> occupancyMap, Map<String, Integer> readCountMap,
|
|
||||||
// int readDepth, double readErrorProb, double errorCollisionProb,
|
|
||||||
// int start, int end, int... sIndices) {
|
|
||||||
// for(int sIndex: sIndices){
|
|
||||||
// for(int i = start; i < end; i++){
|
|
||||||
// countSequencesWithReadDepth(misreadCounts, occupancyMap, readCountMap, readDepth, readErrorProb, errorCollisionProb, wells.get(i), sIndex);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// //For the sequences at cell indices sIndices, counts number of unique sequences in the given well into the given map
|
|
||||||
// //Simulates read depth and read errors, counts the number of reads of a unique sequence into the given map.
|
|
||||||
// //NOTE: this function changes the content of the well, adding spurious cells to contain the misread sequences
|
|
||||||
// //(this is necessary because, in the simulation, the plate is read multiple times, but random misreads can only
|
|
||||||
// //be simulated once).
|
|
||||||
// //(Possibly I should refactor all of this to only require a single plate assay, to speed things up. Or at least
|
|
||||||
// //to see if it would speed things up.)
|
|
||||||
// private void countSequencesWithReadDepth(Map<String, Integer> distinctMisreadCounts, Map<String, Integer> occupancyMap, Map<String, Integer> readCountMap,
|
|
||||||
// int readDepth, double readErrorProb, double errorCollisionProb,
|
|
||||||
// List<String[]> well, int... sIndices) {
|
|
||||||
// //list of spurious cells to add to well after counting
|
|
||||||
// List<String[]> spuriousCells = new ArrayList<>();
|
|
||||||
// for(String[] cell : well) {
|
|
||||||
// //new potential spurious cell for each cell that gets read
|
|
||||||
// String[] spuriousCell = new String[SequenceType.values().length];
|
|
||||||
// //initialize spurious cell with all dropout sequences
|
|
||||||
// Arrays.fill(spuriousCell, "-1");
|
|
||||||
// //has a read error occurred?
|
|
||||||
// boolean readError = false;
|
|
||||||
// for(int sIndex: sIndices){
|
|
||||||
// //skip dropout sequences, which have value "-1"
|
|
||||||
// if(!"-1".equals(cell[sIndex])){
|
|
||||||
// Map<String, Integer> sequencesWithReadCounts = new LinkedHashMap<>();
|
|
||||||
// for(int i = 0; i < readDepth; i++) {
|
|
||||||
// if (rand.nextDouble() <= readErrorProb) {
|
|
||||||
// readError = true;
|
|
||||||
// //Read errors are represented by appending "*"s to the end of the sequence some number of times
|
|
||||||
// StringBuilder spurious = new StringBuilder(cell[sIndex]);
|
|
||||||
// //if this sequence hasn't been misread before, or the read error is unique,
|
|
||||||
// //append one more "*" than has been appended before
|
|
||||||
// if (!distinctMisreadCounts.containsKey(cell[sIndex]) || rand.nextDouble() > errorCollisionProb) {
|
|
||||||
// distinctMisreadCounts.merge(cell[sIndex], 1, (oldValue, newValue) -> oldValue + newValue);
|
|
||||||
// for (int j = 0; j < distinctMisreadCounts.get(cell[sIndex]); j++) {
|
|
||||||
// spurious.append("*");
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// //if this is a read error collision, randomly choose a number of "*"s that has been appended before
|
|
||||||
// else {
|
|
||||||
// int starCount = rand.nextInt(distinctMisreadCounts.get(cell[sIndex]));
|
|
||||||
// for (int j = 0; j < starCount; j++) {
|
|
||||||
// spurious.append("*");
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// sequencesWithReadCounts.merge(spurious.toString(), 1, (oldValue, newValue) -> oldValue + newValue);
|
|
||||||
// //add spurious sequence to spurious cell
|
|
||||||
// spuriousCell[sIndex] = spurious.toString();
|
|
||||||
// }
|
|
||||||
// else {
|
|
||||||
// sequencesWithReadCounts.merge(cell[sIndex], 1, (oldValue, newValue) -> oldValue + newValue);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// for(String seq : sequencesWithReadCounts.keySet()) {
|
|
||||||
// occupancyMap.merge(seq, 1, (oldValue, newValue) -> oldValue + newValue);
|
|
||||||
// readCountMap.merge(seq, sequencesWithReadCounts.get(seq), (oldValue, newValue) -> oldValue + newValue);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// if (readError) { //only add a new spurious cell if there was a read error
|
|
||||||
// spuriousCells.add(spuriousCell);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// //add all spurious cells to the well
|
|
||||||
// well.addAll(spuriousCells);
|
|
||||||
// }
|
|
||||||
|
|
||||||
public String getSourceFileName() {
|
public String getSourceFileName() {
|
||||||
return sourceFile;
|
return sourceFile;
|
||||||
|
|||||||
@@ -13,11 +13,13 @@ public class PlateFileWriter {
|
|||||||
private List<List<String[]>> wells;
|
private List<List<String[]>> wells;
|
||||||
private double stdDev;
|
private double stdDev;
|
||||||
private double lambda;
|
private double lambda;
|
||||||
|
private double zipfExponent;
|
||||||
|
private DistributionType distributionType;
|
||||||
private Double error;
|
private Double error;
|
||||||
private String filename;
|
private String filename;
|
||||||
private String sourceFileName;
|
private String sourceFileName;
|
||||||
private Integer[] populations;
|
private Integer[] populations;
|
||||||
private boolean isExponential = false;
|
|
||||||
|
|
||||||
public PlateFileWriter(String filename, Plate plate) {
|
public PlateFileWriter(String filename, Plate plate) {
|
||||||
if(!filename.matches(".*\\.csv")){
|
if(!filename.matches(".*\\.csv")){
|
||||||
@@ -26,12 +28,17 @@ public class PlateFileWriter {
|
|||||||
this.filename = filename;
|
this.filename = filename;
|
||||||
this.sourceFileName = plate.getSourceFileName();
|
this.sourceFileName = plate.getSourceFileName();
|
||||||
this.size = plate.getSize();
|
this.size = plate.getSize();
|
||||||
this.isExponential = plate.isExponential();
|
this.distributionType = plate.getDistributionType();
|
||||||
if(isExponential) {
|
switch(distributionType) {
|
||||||
|
case POISSON, GAUSSIAN -> {
|
||||||
|
this.stdDev = plate.getStdDev();
|
||||||
|
}
|
||||||
|
case EXPONENTIAL -> {
|
||||||
this.lambda = plate.getLambda();
|
this.lambda = plate.getLambda();
|
||||||
}
|
}
|
||||||
else{
|
case ZIPF -> {
|
||||||
this.stdDev = plate.getStdDev();
|
this.zipfExponent = plate.getZipfExponent();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
this.error = plate.getError();
|
this.error = plate.getError();
|
||||||
this.wells = plate.getWells();
|
this.wells = plate.getWells();
|
||||||
@@ -93,13 +100,24 @@ public class PlateFileWriter {
|
|||||||
printer.printComment("Cell source file name: " + sourceFileName);
|
printer.printComment("Cell source file name: " + sourceFileName);
|
||||||
printer.printComment("Each row represents one well on the plate.");
|
printer.printComment("Each row represents one well on the plate.");
|
||||||
printer.printComment("Plate size: " + size);
|
printer.printComment("Plate size: " + size);
|
||||||
printer.printComment("Error rate: " + error);
|
|
||||||
printer.printComment("Well populations: " + wellPopulationsString);
|
printer.printComment("Well populations: " + wellPopulationsString);
|
||||||
if(isExponential){
|
printer.printComment("Error rate: " + error);
|
||||||
printer.printComment("Lambda: " + lambda);
|
switch (distributionType) {
|
||||||
|
case POISSON -> {
|
||||||
|
printer.printComment("Cell frequency distribution: POISSON");
|
||||||
|
}
|
||||||
|
case GAUSSIAN -> {
|
||||||
|
printer.printComment("Cell frequency distribution: GAUSSIAN");
|
||||||
|
printer.printComment("--Standard deviation: " + stdDev);
|
||||||
|
}
|
||||||
|
case EXPONENTIAL -> {
|
||||||
|
printer.printComment("Cell frequency distribution: EXPONENTIAL");
|
||||||
|
printer.printComment("--Lambda: " + lambda);
|
||||||
|
}
|
||||||
|
case ZIPF -> {
|
||||||
|
printer.printComment("Cell frequency distribution: ZIPF");
|
||||||
|
printer.printComment("--Exponent: " + zipfExponent);
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
printer.printComment("Std. dev.: " + stdDev);
|
|
||||||
}
|
}
|
||||||
printer.printRecords(wellsAsStrings);
|
printer.printRecords(wellsAsStrings);
|
||||||
} catch(IOException ex){
|
} catch(IOException ex){
|
||||||
|
|||||||
@@ -39,6 +39,11 @@ public class SequenceRecord implements Serializable {
|
|||||||
wells.put(wellNumber, readCount);
|
wells.put(wellNumber, readCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Method to remove a well from the occupancy map.
|
||||||
|
//Useful for cases where one sequence is misread as another sequence that isn't actually present in the well
|
||||||
|
//This can reveal itself as an anomalously low read count in that well.
|
||||||
|
public void deleteWell(Integer wellNumber) { wells.remove(wellNumber); }
|
||||||
|
|
||||||
public Set<Integer> getWells() {
|
public Set<Integer> getWells() {
|
||||||
return wells.keySet();
|
return wells.keySet();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
|
import org.jgrapht.Graphs;
|
||||||
import org.jgrapht.alg.interfaces.MatchingAlgorithm;
|
import org.jgrapht.alg.interfaces.MatchingAlgorithm;
|
||||||
import org.jgrapht.alg.matching.MaximumWeightBipartiteMatching;
|
import org.jgrapht.alg.matching.MaximumWeightBipartiteMatching;
|
||||||
import org.jgrapht.generate.SimpleWeightedBipartiteGraphMatrixGenerator;
|
|
||||||
import org.jgrapht.graph.DefaultWeightedEdge;
|
import org.jgrapht.graph.DefaultWeightedEdge;
|
||||||
import org.jgrapht.graph.SimpleWeightedGraph;
|
import org.jgrapht.graph.SimpleWeightedGraph;
|
||||||
import org.jheaps.tree.FibonacciHeap;
|
|
||||||
import org.jheaps.tree.PairingHeap;
|
import org.jheaps.tree.PairingHeap;
|
||||||
|
|
||||||
import java.math.BigDecimal;
|
import java.math.BigDecimal;
|
||||||
@@ -12,14 +11,6 @@ import java.text.NumberFormat;
|
|||||||
import java.time.Instant;
|
import java.time.Instant;
|
||||||
import java.time.Duration;
|
import java.time.Duration;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
/*
|
|
||||||
Refactor notes
|
|
||||||
What would be necessary to do everything with only one scan through the sample plate?
|
|
||||||
I would need to keep a list of sequences (real and spurious), and metadata about each sequence.
|
|
||||||
I would need the data:
|
|
||||||
* # of each well the sequence appears in
|
|
||||||
* Read count in that well
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
//NOTE: "sequence" in method and variable names refers to a peptide sequence from a simulated T cell
|
//NOTE: "sequence" in method and variable names refers to a peptide sequence from a simulated T cell
|
||||||
@@ -27,7 +18,8 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
|
|
||||||
|
|
||||||
public static GraphWithMapData makeCDR3Graph(CellSample cellSample, Plate samplePlate, int readDepth,
|
public static GraphWithMapData makeCDR3Graph(CellSample cellSample, Plate samplePlate, int readDepth,
|
||||||
double readErrorRate, double errorCollisionRate, boolean verbose) {
|
double readErrorRate, double errorCollisionRate,
|
||||||
|
double realSequenceCollisionRate, boolean verbose) {
|
||||||
//start timing
|
//start timing
|
||||||
Instant start = Instant.now();
|
Instant start = Instant.now();
|
||||||
int[] alphaIndices = {SequenceType.CDR3_ALPHA.ordinal()};
|
int[] alphaIndices = {SequenceType.CDR3_ALPHA.ordinal()};
|
||||||
@@ -44,11 +36,11 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
//Make linkedHashMap keyed to sequences, values are SequenceRecords reflecting plate statistics
|
//Make linkedHashMap keyed to sequences, values are SequenceRecords reflecting plate statistics
|
||||||
if(verbose){System.out.println("Making sample plate sequence maps");}
|
if(verbose){System.out.println("Making sample plate sequence maps");}
|
||||||
Map<String, SequenceRecord> alphaSequences = samplePlate.countSequences(readDepth, readErrorRate,
|
Map<String, SequenceRecord> alphaSequences = samplePlate.countSequences(readDepth, readErrorRate,
|
||||||
errorCollisionRate, alphaIndices);
|
errorCollisionRate, realSequenceCollisionRate, alphaIndices);
|
||||||
int alphaCount = alphaSequences.size();
|
int alphaCount = alphaSequences.size();
|
||||||
if(verbose){System.out.println("Alphas sequences read: " + alphaCount);}
|
if(verbose){System.out.println("Alphas sequences read: " + alphaCount);}
|
||||||
Map<String, SequenceRecord> betaSequences = samplePlate.countSequences(readDepth, readErrorRate,
|
Map<String, SequenceRecord> betaSequences = samplePlate.countSequences(readDepth, readErrorRate,
|
||||||
errorCollisionRate, betaIndices);
|
errorCollisionRate, realSequenceCollisionRate, betaIndices);
|
||||||
int betaCount = betaSequences.size();
|
int betaCount = betaSequences.size();
|
||||||
if(verbose){System.out.println("Betas sequences read: " + betaCount);}
|
if(verbose){System.out.println("Betas sequences read: " + betaCount);}
|
||||||
if(verbose){System.out.println("Sample plate sequence maps made");}
|
if(verbose){System.out.println("Sample plate sequence maps made");}
|
||||||
@@ -68,72 +60,126 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
if(verbose){System.out.println("Remaining alpha sequence count: " + alphaSequences.size());}
|
if(verbose){System.out.println("Remaining alpha sequence count: " + alphaSequences.size());}
|
||||||
if(verbose){System.out.println("Remaining beta sequence count: " + betaSequences.size());}
|
if(verbose){System.out.println("Remaining beta sequence count: " + betaSequences.size());}
|
||||||
}
|
}
|
||||||
int pairableAlphaCount = alphaSequences.size();
|
if (realSequenceCollisionRate > 0.0) {
|
||||||
if(verbose){System.out.println("Remaining alpha sequence count: " + pairableAlphaCount);}
|
if(verbose){System.out.println("Removing wells with anomalous read counts from sequence records");}
|
||||||
int pairableBetaCount = betaSequences.size();
|
int alphaWellsRemoved = filterWellsByReadCount(alphaSequences);
|
||||||
if(verbose){System.out.println("Remaining beta sequence count: " + pairableBetaCount);}
|
int betaWellsRemoved = filterWellsByReadCount(betaSequences);
|
||||||
|
if(verbose){System.out.println("Wells with anomalous read counts removed from sequence records");}
|
||||||
|
if(verbose){System.out.println("Total alpha sequence wells removed: " + alphaWellsRemoved);}
|
||||||
|
if(verbose){System.out.println("Total beta sequence wells removed: " + betaWellsRemoved);}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The commented out code below works beautifully for small enough graphs. However, after implementing a
|
||||||
|
* Zipf distribution and attempting to simulate Experiment 3 from the paper again, I discovered that
|
||||||
|
* this method uses too much memory. Even a 120GB heap is not enough to build this adjacency matrix.
|
||||||
|
* So I'm going to attempt to build this graph directly and see if that is less memory intensive
|
||||||
|
*/
|
||||||
|
// //construct the graph. For simplicity, going to make
|
||||||
|
// if(verbose){System.out.println("Making vertex maps");}
|
||||||
|
// //For the SimpleWeightedBipartiteGraphMatrixGenerator, all vertices must have
|
||||||
|
// //distinct numbers associated with them. Since I'm using a 2D array, that means
|
||||||
|
// //distinct indices between the rows and columns. vertexStartValue lets me track where I switch
|
||||||
|
// //from numbering rows to columns, so I can assign unique numbers to every vertex, and then
|
||||||
|
// //subtract the vertexStartValue from betas to use their vertex labels as array indices
|
||||||
|
// int vertexStartValue = 0;
|
||||||
|
// //keys are sequential integer vertices, values are alphas
|
||||||
|
// Map<String, Integer> plateAtoVMap = makeSequenceToVertexMap(alphaSequences, vertexStartValue);
|
||||||
|
// //new start value for vertex to beta map should be one more than final vertex value in alpha map
|
||||||
|
// vertexStartValue += plateAtoVMap.size();
|
||||||
|
// //keys are betas, values are sequential integers
|
||||||
|
// Map<String, Integer> plateBtoVMap = makeSequenceToVertexMap(betaSequences, vertexStartValue);
|
||||||
|
// if(verbose){System.out.println("Vertex maps made");}
|
||||||
|
// //make adjacency matrix for bipartite graph generator
|
||||||
|
// //(technically this is only 1/4 of an adjacency matrix, but that's all you need
|
||||||
|
// //for a bipartite graph, and all the SimpleWeightedBipartiteGraphMatrixGenerator class expects.)
|
||||||
|
// if(verbose){System.out.println("Making adjacency matrix");}
|
||||||
|
// double[][] weights = new double[plateAtoVMap.size()][plateBtoVMap.size()];
|
||||||
|
// fillAdjacencyMatrix(weights, vertexStartValue, alphaSequences, betaSequences, plateAtoVMap, plateBtoVMap);
|
||||||
|
// if(verbose){System.out.println("Adjacency matrix made");}
|
||||||
|
// //make bipartite graph
|
||||||
|
// if(verbose){System.out.println("Making bipartite weighted graph");}
|
||||||
|
// //the graph object
|
||||||
|
// SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph =
|
||||||
|
// new SimpleWeightedGraph<>(DefaultWeightedEdge.class);
|
||||||
|
// //the graph generator
|
||||||
|
// SimpleWeightedBipartiteGraphMatrixGenerator graphGenerator = new SimpleWeightedBipartiteGraphMatrixGenerator();
|
||||||
|
// //the list of alpha vertices
|
||||||
|
// List<Vertex> alphaVertices = new ArrayList<>();
|
||||||
|
// for (String seq : plateAtoVMap.keySet()) {
|
||||||
|
// Vertex alphaVertex = new Vertex(alphaSequences.get(seq), plateAtoVMap.get(seq));
|
||||||
|
// alphaVertices.add(alphaVertex);
|
||||||
|
// }
|
||||||
|
// //Sort to make sure the order of vertices in list matches the order of the adjacency matrix
|
||||||
|
// Collections.sort(alphaVertices);
|
||||||
|
// //Add ordered list of vertices to the graph
|
||||||
|
// graphGenerator.first(alphaVertices);
|
||||||
|
// //the list of beta vertices
|
||||||
|
// List<Vertex> betaVertices = new ArrayList<>();
|
||||||
|
// for (String seq : plateBtoVMap.keySet()) {
|
||||||
|
// Vertex betaVertex = new Vertex(betaSequences.get(seq), plateBtoVMap.get(seq));
|
||||||
|
// betaVertices.add(betaVertex);
|
||||||
|
// }
|
||||||
|
// //Sort to make sure the order of vertices in list matches the order of the adjacency matrix
|
||||||
|
// Collections.sort(betaVertices);
|
||||||
|
// //Add ordered list of vertices to the graph
|
||||||
|
// graphGenerator.second(betaVertices);
|
||||||
|
// //use adjacency matrix of weight created previously
|
||||||
|
// graphGenerator.weights(weights);
|
||||||
|
// graphGenerator.generateGraph(graph);
|
||||||
|
|
||||||
//construct the graph. For simplicity, going to make
|
|
||||||
if(verbose){System.out.println("Making vertex maps");}
|
|
||||||
//For the SimpleWeightedBipartiteGraphMatrixGenerator, all vertices must have
|
|
||||||
//distinct numbers associated with them. Since I'm using a 2D array, that means
|
|
||||||
//distinct indices between the rows and columns. vertexStartValue lets me track where I switch
|
|
||||||
//from numbering rows to columns, so I can assign unique numbers to every vertex, and then
|
|
||||||
//subtract the vertexStartValue from betas to use their vertex labels as array indices
|
|
||||||
int vertexStartValue = 0;
|
|
||||||
//keys are sequential integer vertices, values are alphas
|
|
||||||
Map<String, Integer> plateAtoVMap = makeSequenceToVertexMap(alphaSequences, vertexStartValue);
|
|
||||||
//new start value for vertex to beta map should be one more than final vertex value in alpha map
|
|
||||||
vertexStartValue += plateAtoVMap.size();
|
|
||||||
//keys are betas, values are sequential integers
|
|
||||||
Map<String, Integer> plateBtoVMap = makeSequenceToVertexMap(betaSequences, vertexStartValue);
|
|
||||||
if(verbose){System.out.println("Vertex maps made");}
|
|
||||||
//make adjacency matrix for bipartite graph generator
|
|
||||||
//(technically this is only 1/4 of an adjacency matrix, but that's all you need
|
|
||||||
//for a bipartite graph, and all the SimpleWeightedBipartiteGraphMatrixGenerator class expects.)
|
|
||||||
if(verbose){System.out.println("Making adjacency matrix");}
|
|
||||||
double[][] weights = new double[plateAtoVMap.size()][plateBtoVMap.size()];
|
|
||||||
fillAdjacencyMatrix(weights, vertexStartValue, alphaSequences, betaSequences, plateAtoVMap, plateBtoVMap);
|
|
||||||
if(verbose){System.out.println("Adjacency matrix made");}
|
|
||||||
//make bipartite graph
|
//make bipartite graph
|
||||||
if(verbose){System.out.println("Making bipartite weighted graph");}
|
if(verbose){System.out.println("Making bipartite weighted graph");}
|
||||||
//the graph object
|
//the graph object
|
||||||
SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph =
|
SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph =
|
||||||
new SimpleWeightedGraph<>(DefaultWeightedEdge.class);
|
new SimpleWeightedGraph<>(DefaultWeightedEdge.class);
|
||||||
//the graph generator
|
int vertexLabelValue = 0;
|
||||||
SimpleWeightedBipartiteGraphMatrixGenerator graphGenerator = new SimpleWeightedBipartiteGraphMatrixGenerator();
|
//create and add alpha sequence vertices
|
||||||
//the list of alpha vertices
|
|
||||||
List<Vertex> alphaVertices = new ArrayList<>();
|
List<Vertex> alphaVertices = new ArrayList<>();
|
||||||
for (String seq : plateAtoVMap.keySet()) {
|
for (Map.Entry<String, SequenceRecord> entry: alphaSequences.entrySet()) {
|
||||||
Vertex alphaVertex = new Vertex(alphaSequences.get(seq), plateAtoVMap.get(seq));
|
alphaVertices.add(new Vertex(entry.getValue(), vertexLabelValue));
|
||||||
alphaVertices.add(alphaVertex);
|
vertexLabelValue++;
|
||||||
}
|
}
|
||||||
//Sort to make sure the order of vertices in list matches the order of the adjacency matrix
|
alphaVertices.forEach(graph::addVertex);
|
||||||
Collections.sort(alphaVertices);
|
//add beta sequence vertices
|
||||||
//Add ordered list of vertices to the graph
|
|
||||||
graphGenerator.first(alphaVertices);
|
|
||||||
//the list of beta vertices
|
|
||||||
List<Vertex> betaVertices = new ArrayList<>();
|
List<Vertex> betaVertices = new ArrayList<>();
|
||||||
for (String seq : plateBtoVMap.keySet()) {
|
for (Map.Entry<String, SequenceRecord> entry: betaSequences.entrySet()) {
|
||||||
Vertex betaVertex = new Vertex(betaSequences.get(seq), plateBtoVMap.get(seq));
|
betaVertices.add(new Vertex(entry.getValue(), vertexLabelValue));
|
||||||
betaVertices.add(betaVertex);
|
vertexLabelValue++;
|
||||||
|
}
|
||||||
|
betaVertices.forEach(graph::addVertex);
|
||||||
|
//add edges (best so far)
|
||||||
|
int edgesAddedCount = 0;
|
||||||
|
for(Vertex a: alphaVertices) {
|
||||||
|
Set<Integer> a_wells = a.getRecord().getWells();
|
||||||
|
for(Vertex b: betaVertices) {
|
||||||
|
Set<Integer> sharedWells = new HashSet<>(a_wells);
|
||||||
|
sharedWells.retainAll(b.getRecord().getWells());
|
||||||
|
if (!sharedWells.isEmpty()) {
|
||||||
|
Graphs.addEdge(graph, a, b, (double) sharedWells.size());
|
||||||
|
}
|
||||||
|
edgesAddedCount++;
|
||||||
|
if (edgesAddedCount % 10000000 == 0) { //collect garbage every 10,000,000 edges
|
||||||
|
System.out.println(edgesAddedCount + " edges added");
|
||||||
|
//request garbage collection
|
||||||
|
System.gc();
|
||||||
|
System.out.println("Garbage collection requested");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
//Sort to make sure the order of vertices in list matches the order of the adjacency matrix
|
|
||||||
Collections.sort(betaVertices);
|
|
||||||
//Add ordered list of vertices to the graph
|
|
||||||
graphGenerator.second(betaVertices);
|
|
||||||
//use adjacency matrix of weight created previously
|
|
||||||
graphGenerator.weights(weights);
|
|
||||||
graphGenerator.generateGraph(graph);
|
|
||||||
if(verbose){System.out.println("Graph created");}
|
if(verbose){System.out.println("Graph created");}
|
||||||
//stop timing
|
//stop timing
|
||||||
Instant stop = Instant.now();
|
Instant stop = Instant.now();
|
||||||
Duration time = Duration.between(start, stop);
|
Duration time = Duration.between(start, stop);
|
||||||
//create GraphWithMapData object
|
//create GraphWithMapData object
|
||||||
GraphWithMapData output = new GraphWithMapData(graph, numWells, samplePlate.getPopulations(), distCellsMapAlphaKey,
|
GraphWithMapData output = new GraphWithMapData(graph, numWells, samplePlate.getPopulations(), distCellsMapAlphaKey,
|
||||||
alphaCount, betaCount, readDepth, readErrorRate, errorCollisionRate, time);
|
alphaCount, betaCount, samplePlate.getError(), readDepth, readErrorRate, errorCollisionRate, realSequenceCollisionRate, time);
|
||||||
//Set source file name in graph to name of sample plate
|
//Set cell sample file name in graph to name of cell sample
|
||||||
output.setSourceFilename(samplePlate.getFilename());
|
output.setCellFilename(cellSample.getFilename());
|
||||||
|
//Set cell sample size in graph
|
||||||
|
output.setCellSampleSize(cellSample.getCellCount());
|
||||||
|
//Set sample plate file name in graph to name of sample plate
|
||||||
|
output.setPlateFilename(samplePlate.getFilename());
|
||||||
//return GraphWithMapData object
|
//return GraphWithMapData object
|
||||||
return output;
|
return output;
|
||||||
}
|
}
|
||||||
@@ -141,10 +187,10 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
//match CDR3s.
|
//match CDR3s.
|
||||||
public static MatchingResult matchCDR3s(GraphWithMapData data, String dataFilename, Integer lowThreshold,
|
public static MatchingResult matchCDR3s(GraphWithMapData data, String dataFilename, Integer lowThreshold,
|
||||||
Integer highThreshold, Integer maxOccupancyDifference,
|
Integer highThreshold, Integer maxOccupancyDifference,
|
||||||
Integer minOverlapPercent, boolean verbose) {
|
Integer minOverlapPercent, boolean verbose, boolean calculatePValue) {
|
||||||
Instant start = Instant.now();
|
Instant start = Instant.now();
|
||||||
SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph = data.getGraph();
|
SimpleWeightedGraph<Vertex, DefaultWeightedEdge> graph = data.getGraph();
|
||||||
Map<Vertex[], Integer> removedEdges = new HashMap<>();
|
Map<DefaultWeightedEdge, Vertex[]> removedEdges = new HashMap<>();
|
||||||
boolean saveEdges = BiGpairSEQ.cacheGraph();
|
boolean saveEdges = BiGpairSEQ.cacheGraph();
|
||||||
int numWells = data.getNumWells();
|
int numWells = data.getNumWells();
|
||||||
//Integer alphaCount = data.getAlphaCount();
|
//Integer alphaCount = data.getAlphaCount();
|
||||||
@@ -162,6 +208,7 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
}
|
}
|
||||||
Integer graphAlphaCount = alphas.size();
|
Integer graphAlphaCount = alphas.size();
|
||||||
Integer graphBetaCount = betas.size();
|
Integer graphBetaCount = betas.size();
|
||||||
|
Integer graphEdgeCount = graph.edgeSet().size();
|
||||||
|
|
||||||
//remove edges with weights outside given overlap thresholds, add those to removed edge list
|
//remove edges with weights outside given overlap thresholds, add those to removed edge list
|
||||||
if(verbose){System.out.println("Eliminating edges with weights outside overlap threshold values");}
|
if(verbose){System.out.println("Eliminating edges with weights outside overlap threshold values");}
|
||||||
@@ -181,33 +228,39 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
if(verbose){System.out.println("Edges between vertices of with excessively different occupancy values " +
|
if(verbose){System.out.println("Edges between vertices of with excessively different occupancy values " +
|
||||||
"removed");}
|
"removed");}
|
||||||
|
|
||||||
|
Integer filteredGraphEdgeCount = graph.edgeSet().size();
|
||||||
|
|
||||||
//Find Maximum Weight Matching
|
//Find Maximum Weight Matching
|
||||||
//using jheaps library class PairingHeap for improved efficiency
|
|
||||||
if(verbose){System.out.println("Finding maximum weight matching");}
|
if(verbose){System.out.println("Finding maximum weight matching");}
|
||||||
MaximumWeightBipartiteMatching maxWeightMatching;
|
//The matching object
|
||||||
//Use correct heap type for priority queue
|
MatchingAlgorithm<Vertex, DefaultWeightedEdge> maxWeightMatching;
|
||||||
String heapType = BiGpairSEQ.getPriorityQueueHeapType();
|
//Determine algorithm type
|
||||||
switch (heapType) {
|
AlgorithmType algorithm = BiGpairSEQ.getMatchingAlgorithmType();
|
||||||
case "PAIRING" -> {
|
switch (algorithm) { //Only two options now, but I have room to add more algorithms in the future this way
|
||||||
maxWeightMatching = new MaximumWeightBipartiteMatching(graph,
|
case AUCTION -> {
|
||||||
|
//create a new MaximumIntegerWeightBipartiteAuctionMatching
|
||||||
|
maxWeightMatching = new MaximumIntegerWeightBipartiteAuctionMatching<>(graph, alphas, betas);
|
||||||
|
}
|
||||||
|
case INTEGER_WEIGHT_SCALING -> {
|
||||||
|
maxWeightMatching = new MaximumIntegerWeightBipartiteMatching<>(graph, alphas, betas, new BigDecimal(highThreshold));
|
||||||
|
}
|
||||||
|
default -> { //HUNGARIAN
|
||||||
|
//use selected heap type for priority queue
|
||||||
|
HeapType heap = BiGpairSEQ.getPriorityQueueHeapType();
|
||||||
|
if(HeapType.PAIRING.equals(heap)) {
|
||||||
|
maxWeightMatching = new MaximumWeightBipartiteMatching<Vertex, DefaultWeightedEdge>(graph,
|
||||||
alphas,
|
alphas,
|
||||||
betas,
|
betas,
|
||||||
i -> new PairingHeap(Comparator.naturalOrder()));
|
i -> new PairingHeap(Comparator.naturalOrder()));
|
||||||
}
|
}
|
||||||
case "FIBONACCI" -> {
|
else {//Fibonacci is the default, and what's used in the JGraphT implementation
|
||||||
maxWeightMatching = new MaximumWeightBipartiteMatching(graph,
|
maxWeightMatching = new MaximumWeightBipartiteMatching<Vertex, DefaultWeightedEdge>(graph,
|
||||||
alphas,
|
|
||||||
betas,
|
|
||||||
i -> new FibonacciHeap(Comparator.naturalOrder()));
|
|
||||||
}
|
|
||||||
default -> {
|
|
||||||
maxWeightMatching = new MaximumWeightBipartiteMatching(graph,
|
|
||||||
alphas,
|
alphas,
|
||||||
betas);
|
betas);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//get the matching
|
}
|
||||||
MatchingAlgorithm.Matching<String, DefaultWeightedEdge> graphMatching = maxWeightMatching.getMatching();
|
MatchingAlgorithm.Matching<Vertex, DefaultWeightedEdge> matching = maxWeightMatching.getMatching();
|
||||||
if(verbose){System.out.println("Matching completed");}
|
if(verbose){System.out.println("Matching completed");}
|
||||||
Instant stop = Instant.now();
|
Instant stop = Instant.now();
|
||||||
|
|
||||||
@@ -219,13 +272,13 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
header.add("Beta well count");
|
header.add("Beta well count");
|
||||||
header.add("Overlap well count");
|
header.add("Overlap well count");
|
||||||
header.add("Matched correctly?");
|
header.add("Matched correctly?");
|
||||||
header.add("P-value");
|
if(calculatePValue) { header.add("P-value"); }
|
||||||
|
|
||||||
//Results for csv file
|
//Results for csv file
|
||||||
List<List<String>> allResults = new ArrayList<>();
|
List<List<String>> allResults = new ArrayList<>();
|
||||||
NumberFormat nf = NumberFormat.getInstance(Locale.US);
|
NumberFormat nf = NumberFormat.getInstance(Locale.US);
|
||||||
MathContext mc = new MathContext(3);
|
MathContext mc = new MathContext(3);
|
||||||
Iterator<DefaultWeightedEdge> weightIter = graphMatching.iterator();
|
Iterator<DefaultWeightedEdge> weightIter = matching.iterator();
|
||||||
DefaultWeightedEdge e;
|
DefaultWeightedEdge e;
|
||||||
int trueCount = 0;
|
int trueCount = 0;
|
||||||
int falseCount = 0;
|
int falseCount = 0;
|
||||||
@@ -256,18 +309,32 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
//overlap count
|
//overlap count
|
||||||
result.add(Double.toString(graph.getEdgeWeight(e)));
|
result.add(Double.toString(graph.getEdgeWeight(e)));
|
||||||
result.add(Boolean.toString(check));
|
result.add(Boolean.toString(check));
|
||||||
|
if (calculatePValue) {
|
||||||
double pValue = Equations.pValue(numWells, source.getOccupancy(),
|
double pValue = Equations.pValue(numWells, source.getOccupancy(),
|
||||||
target.getOccupancy(), graph.getEdgeWeight(e));
|
target.getOccupancy(), graph.getEdgeWeight(e));
|
||||||
BigDecimal pValueTrunc = new BigDecimal(pValue, mc);
|
BigDecimal pValueTrunc = new BigDecimal(pValue, mc);
|
||||||
result.add(pValueTrunc.toString());
|
result.add(pValueTrunc.toString());
|
||||||
|
}
|
||||||
allResults.add(result);
|
allResults.add(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
//Metadata comments for CSV file
|
//Metadata comments for CSV file
|
||||||
String algoType = "LEDA book with heap: " + heapType;
|
String algoType;
|
||||||
|
switch(algorithm) {
|
||||||
|
case AUCTION -> {
|
||||||
|
algoType = "Auction algorithm";
|
||||||
|
}
|
||||||
|
case INTEGER_WEIGHT_SCALING -> {
|
||||||
|
algoType = "Integer weight scaling algorithm from Duan and Su (not yet perfectly implemented)";
|
||||||
|
}
|
||||||
|
default -> { //HUNGARIAN
|
||||||
|
algoType = "Hungarian algorithm with heap: " + BiGpairSEQ.getPriorityQueueHeapType().name();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int min = Math.min(graphAlphaCount, graphBetaCount);
|
int min = Math.min(graphAlphaCount, graphBetaCount);
|
||||||
//matching weight
|
//matching weight
|
||||||
BigDecimal totalMatchingWeight = maxWeightMatching.getMatchingWeight();
|
Double matchingWeight = matching.getWeight();
|
||||||
//rate of attempted matching
|
//rate of attempted matching
|
||||||
double attemptRate = (double) (trueCount + falseCount) / min;
|
double attemptRate = (double) (trueCount + falseCount) / min;
|
||||||
BigDecimal attemptRateTrunc = new BigDecimal(attemptRate, mc);
|
BigDecimal attemptRateTrunc = new BigDecimal(attemptRate, mc);
|
||||||
@@ -299,22 +366,25 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
|
|
||||||
|
|
||||||
Map<String, String> metadata = new LinkedHashMap<>();
|
Map<String, String> metadata = new LinkedHashMap<>();
|
||||||
metadata.put("sample plate filename", data.getSourceFilename());
|
metadata.put("cell sample filename", data.getCellFilename());
|
||||||
|
metadata.put("cell sample size", data.getCellSampleSize().toString());
|
||||||
|
metadata.put("sample plate filename", data.getPlateFilename());
|
||||||
|
metadata.put("sample plate well count", data.getNumWells().toString());
|
||||||
|
metadata.put("sequence dropout rate", data.getDropoutRate().toString());
|
||||||
metadata.put("graph filename", dataFilename);
|
metadata.put("graph filename", dataFilename);
|
||||||
metadata.put("MWM algorithm type", algoType);
|
metadata.put("MWM algorithm type", algoType);
|
||||||
metadata.put("matching weight", totalMatchingWeight.toString());
|
metadata.put("matching weight", matchingWeight.toString());
|
||||||
metadata.put("well populations", wellPopulationsString);
|
metadata.put("well populations", wellPopulationsString);
|
||||||
metadata.put("sequence read depth", data.getReadDepth().toString());
|
metadata.put("sequence read depth", data.getReadDepth().toString());
|
||||||
metadata.put("sequence read error rate", data.getReadErrorRate().toString());
|
metadata.put("sequence read error rate", data.getReadErrorRate().toString());
|
||||||
metadata.put("read error collision rate", data.getErrorCollisionRate().toString());
|
metadata.put("read error collision rate", data.getErrorCollisionRate().toString());
|
||||||
|
metadata.put("real sequence collision rate", data.getRealSequenceCollisionRate().toString());
|
||||||
metadata.put("total alphas read from plate", data.getAlphaCount().toString());
|
metadata.put("total alphas read from plate", data.getAlphaCount().toString());
|
||||||
metadata.put("total betas read from plate", data.getBetaCount().toString());
|
metadata.put("total betas read from plate", data.getBetaCount().toString());
|
||||||
//HARD CODED, PARAMETERIZE LATER
|
metadata.put("initial edges in graph", graphEdgeCount.toString());
|
||||||
metadata.put("pre-filter sequences present in all wells", "true");
|
|
||||||
//HARD CODED, PARAMETERIZE LATER
|
|
||||||
metadata.put("pre-filter sequences based on occupancy/read count discrepancy", "true");
|
|
||||||
metadata.put("alphas in graph (after pre-filtering)", graphAlphaCount.toString());
|
metadata.put("alphas in graph (after pre-filtering)", graphAlphaCount.toString());
|
||||||
metadata.put("betas in graph (after pre-filtering)", graphBetaCount.toString());
|
metadata.put("betas in graph (after pre-filtering)", graphBetaCount.toString());
|
||||||
|
metadata.put("final edges in graph (after pre-filtering)", filteredGraphEdgeCount.toString());
|
||||||
metadata.put("high overlap threshold for pairing", highThreshold.toString());
|
metadata.put("high overlap threshold for pairing", highThreshold.toString());
|
||||||
metadata.put("low overlap threshold for pairing", lowThreshold.toString());
|
metadata.put("low overlap threshold for pairing", lowThreshold.toString());
|
||||||
metadata.put("minimum overlap percent for pairing", minOverlapPercent.toString());
|
metadata.put("minimum overlap percent for pairing", minOverlapPercent.toString());
|
||||||
@@ -343,6 +413,7 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
return output;
|
return output;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//Commented out CDR1 matching until it's time to re-implement it
|
//Commented out CDR1 matching until it's time to re-implement it
|
||||||
// //Simulated matching of CDR1s to CDR3s. Requires MatchingResult from prior run of matchCDR3s.
|
// //Simulated matching of CDR1s to CDR3s. Requires MatchingResult from prior run of matchCDR3s.
|
||||||
// public static MatchingResult[] matchCDR1s(List<Integer[]> distinctCells,
|
// public static MatchingResult[] matchCDR1s(List<Integer[]> distinctCells,
|
||||||
@@ -649,7 +720,7 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
// }
|
// }
|
||||||
|
|
||||||
//Remove sequences based on occupancy
|
//Remove sequences based on occupancy
|
||||||
public static void filterByOccupancyThresholds(Map<String, SequenceRecord> wellMap, int low, int high){
|
private static void filterByOccupancyThresholds(Map<String, SequenceRecord> wellMap, int low, int high){
|
||||||
List<String> noise = new ArrayList<>();
|
List<String> noise = new ArrayList<>();
|
||||||
for(String k: wellMap.keySet()){
|
for(String k: wellMap.keySet()){
|
||||||
if((wellMap.get(k).getOccupancy() > high) || (wellMap.get(k).getOccupancy() < low)){
|
if((wellMap.get(k).getOccupancy() > high) || (wellMap.get(k).getOccupancy() < low)){
|
||||||
@@ -661,10 +732,10 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void filterByOccupancyAndReadCount(Map<String, SequenceRecord> sequences, int readDepth) {
|
private static void filterByOccupancyAndReadCount(Map<String, SequenceRecord> sequences, int readDepth) {
|
||||||
List<String> noise = new ArrayList<>();
|
List<String> noise = new ArrayList<>();
|
||||||
for(String k : sequences.keySet()){
|
for(String k : sequences.keySet()){
|
||||||
//occupancy times read depth should be more than half the sequence read count if the read error rate is low
|
//the sequence read count should be more than half the occupancy times read depth if the read error rate is low
|
||||||
Integer threshold = (sequences.get(k).getOccupancy() * readDepth) / 2;
|
Integer threshold = (sequences.get(k).getOccupancy() * readDepth) / 2;
|
||||||
if(sequences.get(k).getReadCount() < threshold) {
|
if(sequences.get(k).getReadCount() < threshold) {
|
||||||
noise.add(k);
|
noise.add(k);
|
||||||
@@ -675,6 +746,26 @@ public class Simulator implements GraphModificationFunctions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static int filterWellsByReadCount(Map<String, SequenceRecord> sequences) {
|
||||||
|
int count = 0;
|
||||||
|
for (String k: sequences.keySet()) {
|
||||||
|
//If a sequence has read count R and appears in W wells, then on average its read count in each
|
||||||
|
//well should be R/W. Delete any wells where the read count is less than R/2W.
|
||||||
|
Integer threshold = sequences.get(k).getReadCount() / (2 * sequences.get(k).getOccupancy());
|
||||||
|
List<Integer> noise = new ArrayList<>();
|
||||||
|
for (Integer well: sequences.get(k).getWells()) {
|
||||||
|
if (sequences.get(k).getReadCount(well) < threshold) {
|
||||||
|
noise.add(well);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (Integer well: noise) {
|
||||||
|
sequences.get(k).deleteWell(well);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
private static Map<String, String> makeSequenceToSequenceMap(List<String[]> cells, int keySequenceIndex,
|
private static Map<String, String> makeSequenceToSequenceMap(List<String[]> cells, int keySequenceIndex,
|
||||||
int valueSequenceIndex){
|
int valueSequenceIndex){
|
||||||
Map<String, String> keySequenceToValueSequenceMap = new HashMap<>();
|
Map<String, String> keySequenceToValueSequenceMap = new HashMap<>();
|
||||||
|
|||||||
@@ -32,6 +32,8 @@ public class Vertex implements Serializable, Comparable<Vertex> {
|
|||||||
|
|
||||||
public Integer getReadCount() { return record.getReadCount(); }
|
public Integer getReadCount() { return record.getReadCount(); }
|
||||||
|
|
||||||
|
public Integer getReadCount(Integer well) { return record.getReadCount(well); }
|
||||||
|
|
||||||
public Map<Integer, Integer> getWellOccupancies() { return record.getWellOccupancies(); }
|
public Map<Integer, Integer> getWellOccupancies() { return record.getWellOccupancies(); }
|
||||||
|
|
||||||
@Override //adapted from JGraphT example code
|
@Override //adapted from JGraphT example code
|
||||||
@@ -72,4 +74,12 @@ public class Vertex implements Serializable, Comparable<Vertex> {
|
|||||||
public int compareTo(Vertex other) {
|
public int compareTo(Vertex other) {
|
||||||
return this.vertexLabel - other.getVertexLabel();
|
return this.vertexLabel - other.getVertexLabel();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Double getPotential() {
|
||||||
|
return potential;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setPotential(Double potential) {
|
||||||
|
this.potential = potential;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user