This view is limited to 50 files because it contains too many changes.  See the raw diff here.
Files changed (50) hide show
  1. .gitattributes +117 -0
  2. README.md +190 -1
  3. chunked/d4-dock_000.csv.gz +3 -0
  4. chunked/d4-dock_000.log +22 -0
  5. chunked/d4-dock_001.csv.gz +3 -0
  6. chunked/d4-dock_001.log +22 -0
  7. chunked/d4-dock_002.csv.gz +3 -0
  8. chunked/d4-dock_002.log +22 -0
  9. chunked/d4-dock_003.csv.gz +3 -0
  10. chunked/d4-dock_003.log +22 -0
  11. chunked/d4-dock_004.csv.gz +3 -0
  12. chunked/d4-dock_004.log +22 -0
  13. chunked/d4-dock_005.csv.gz +3 -0
  14. chunked/d4-dock_005.log +22 -0
  15. chunked/d4-dock_006.csv.gz +3 -0
  16. chunked/d4-dock_006.log +22 -0
  17. chunked/d4-dock_007.csv.gz +3 -0
  18. chunked/d4-dock_007.log +22 -0
  19. chunked/d4-dock_008.csv.gz +3 -0
  20. chunked/d4-dock_008.log +22 -0
  21. chunked/d4-dock_009.csv.gz +3 -0
  22. chunked/d4-dock_009.log +22 -0
  23. chunked/d4-dock_010.csv.gz +3 -0
  24. chunked/d4-dock_010.log +22 -0
  25. chunked/d4-dock_011.csv.gz +3 -0
  26. chunked/d4-dock_011.log +22 -0
  27. chunked/d4-dock_012.csv.gz +3 -0
  28. chunked/d4-dock_012.log +22 -0
  29. chunked/d4-dock_013.csv.gz +3 -0
  30. chunked/d4-dock_013.log +22 -0
  31. chunked/d4-dock_014.csv.gz +3 -0
  32. chunked/d4-dock_014.log +22 -0
  33. chunked/d4-dock_015.csv.gz +3 -0
  34. chunked/d4-dock_015.log +22 -0
  35. chunked/d4-dock_016.csv.gz +3 -0
  36. chunked/d4-dock_016.log +22 -0
  37. chunked/d4-dock_017.csv.gz +3 -0
  38. chunked/d4-dock_017.log +22 -0
  39. chunked/d4-dock_018.csv.gz +3 -0
  40. chunked/d4-dock_018.log +22 -0
  41. chunked/d4-dock_019.csv.gz +3 -0
  42. chunked/d4-dock_019.log +22 -0
  43. chunked/d4-dock_020.csv.gz +3 -0
  44. chunked/d4-dock_020.log +22 -0
  45. chunked/d4-dock_021.csv.gz +3 -0
  46. chunked/d4-dock_021.log +22 -0
  47. chunked/d4-dock_022.csv.gz +3 -0
  48. chunked/d4-dock_022.log +22 -0
  49. chunked/d4-dock_023.csv.gz +3 -0
  50. chunked/d4-dock_023.log +22 -0
.gitattributes CHANGED
@@ -56,3 +56,120 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  # Video files - compressed
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
+ chunked/d4-dock_000.csv.gz filter=lfs diff=lfs merge=lfs -text
60
+ chunked/d4-dock_018.csv.gz filter=lfs diff=lfs merge=lfs -text
61
+ chunked/d4-dock_033.csv.gz filter=lfs diff=lfs merge=lfs -text
62
+ chunked/d4-dock_042.csv.gz filter=lfs diff=lfs merge=lfs -text
63
+ chunked/d4-dock_047.csv.gz filter=lfs diff=lfs merge=lfs -text
64
+ chunked/d4-dock_050.csv.gz filter=lfs diff=lfs merge=lfs -text
65
+ chunked/d4-dock_105.csv.gz filter=lfs diff=lfs merge=lfs -text
66
+ chunked/d4-dock_031.csv.gz filter=lfs diff=lfs merge=lfs -text
67
+ chunked/d4-dock_113.csv.gz filter=lfs diff=lfs merge=lfs -text
68
+ chunked/d4-dock_012.csv.gz filter=lfs diff=lfs merge=lfs -text
69
+ chunked/d4-dock_022.csv.gz filter=lfs diff=lfs merge=lfs -text
70
+ chunked/d4-dock_038.csv.gz filter=lfs diff=lfs merge=lfs -text
71
+ chunked/d4-dock_062.csv.gz filter=lfs diff=lfs merge=lfs -text
72
+ chunked/d4-dock_072.csv.gz filter=lfs diff=lfs merge=lfs -text
73
+ chunked/d4-dock_004.csv.gz filter=lfs diff=lfs merge=lfs -text
74
+ chunked/d4-dock_061.csv.gz filter=lfs diff=lfs merge=lfs -text
75
+ chunked/d4-dock_097.csv.gz filter=lfs diff=lfs merge=lfs -text
76
+ chunked/d4-dock_019.csv.gz filter=lfs diff=lfs merge=lfs -text
77
+ chunked/d4-dock_054.csv.gz filter=lfs diff=lfs merge=lfs -text
78
+ chunked/d4-dock_090.csv.gz filter=lfs diff=lfs merge=lfs -text
79
+ chunked/d4-dock_084.csv.gz filter=lfs diff=lfs merge=lfs -text
80
+ chunked/d4-dock_088.csv.gz filter=lfs diff=lfs merge=lfs -text
81
+ chunked/d4-dock_110.csv.gz filter=lfs diff=lfs merge=lfs -text
82
+ chunked/d4-dock_021.csv.gz filter=lfs diff=lfs merge=lfs -text
83
+ chunked/d4-dock_041.csv.gz filter=lfs diff=lfs merge=lfs -text
84
+ chunked/d4-dock_049.csv.gz filter=lfs diff=lfs merge=lfs -text
85
+ chunked/d4-dock_052.csv.gz filter=lfs diff=lfs merge=lfs -text
86
+ chunked/d4-dock_073.csv.gz filter=lfs diff=lfs merge=lfs -text
87
+ chunked/d4-dock_040.csv.gz filter=lfs diff=lfs merge=lfs -text
88
+ chunked/d4-dock_055.csv.gz filter=lfs diff=lfs merge=lfs -text
89
+ chunked/d4-dock_069.csv.gz filter=lfs diff=lfs merge=lfs -text
90
+ chunked/d4-dock_079.csv.gz filter=lfs diff=lfs merge=lfs -text
91
+ chunked/d4-dock_086.csv.gz filter=lfs diff=lfs merge=lfs -text
92
+ chunked/d4-dock_020.csv.gz filter=lfs diff=lfs merge=lfs -text
93
+ chunked/d4-dock_039.csv.gz filter=lfs diff=lfs merge=lfs -text
94
+ chunked/d4-dock_101.csv.gz filter=lfs diff=lfs merge=lfs -text
95
+ chunked/d4-dock_111.csv.gz filter=lfs diff=lfs merge=lfs -text
96
+ chunked/d4-dock_015.csv.gz filter=lfs diff=lfs merge=lfs -text
97
+ chunked/d4-dock_064.csv.gz filter=lfs diff=lfs merge=lfs -text
98
+ chunked/d4-dock_104.csv.gz filter=lfs diff=lfs merge=lfs -text
99
+ chunked/d4-dock_059.csv.gz filter=lfs diff=lfs merge=lfs -text
100
+ chunked/d4-dock_063.csv.gz filter=lfs diff=lfs merge=lfs -text
101
+ chunked/d4-dock_066.csv.gz filter=lfs diff=lfs merge=lfs -text
102
+ chunked/d4-dock_112.csv.gz filter=lfs diff=lfs merge=lfs -text
103
+ chunked/d4-dock_115.csv.gz filter=lfs diff=lfs merge=lfs -text
104
+ chunked/d4-dock_116.csv.gz filter=lfs diff=lfs merge=lfs -text
105
+ chunked/d4-dock_003.csv.gz filter=lfs diff=lfs merge=lfs -text
106
+ chunked/d4-dock_006.csv.gz filter=lfs diff=lfs merge=lfs -text
107
+ chunked/d4-dock_028.csv.gz filter=lfs diff=lfs merge=lfs -text
108
+ chunked/d4-dock_034.csv.gz filter=lfs diff=lfs merge=lfs -text
109
+ chunked/d4-dock_078.csv.gz filter=lfs diff=lfs merge=lfs -text
110
+ chunked/d4-dock_081.csv.gz filter=lfs diff=lfs merge=lfs -text
111
+ chunked/d4-dock_093.csv.gz filter=lfs diff=lfs merge=lfs -text
112
+ chunked/d4-dock_109.csv.gz filter=lfs diff=lfs merge=lfs -text
113
+ chunked/d4-dock_008.csv.gz filter=lfs diff=lfs merge=lfs -text
114
+ chunked/d4-dock_091.csv.gz filter=lfs diff=lfs merge=lfs -text
115
+ chunked/d4-dock_096.csv.gz filter=lfs diff=lfs merge=lfs -text
116
+ chunked/d4-dock_107.csv.gz filter=lfs diff=lfs merge=lfs -text
117
+ chunked/d4-dock_002.csv.gz filter=lfs diff=lfs merge=lfs -text
118
+ chunked/d4-dock_009.csv.gz filter=lfs diff=lfs merge=lfs -text
119
+ chunked/d4-dock_029.csv.gz filter=lfs diff=lfs merge=lfs -text
120
+ chunked/d4-dock_060.csv.gz filter=lfs diff=lfs merge=lfs -text
121
+ chunked/d4-dock_089.csv.gz filter=lfs diff=lfs merge=lfs -text
122
+ chunked/d4-dock_114.csv.gz filter=lfs diff=lfs merge=lfs -text
123
+ chunked/d4-dock_014.csv.gz filter=lfs diff=lfs merge=lfs -text
124
+ chunked/d4-dock_025.csv.gz filter=lfs diff=lfs merge=lfs -text
125
+ chunked/d4-dock_045.csv.gz filter=lfs diff=lfs merge=lfs -text
126
+ chunked/d4-dock_070.csv.gz filter=lfs diff=lfs merge=lfs -text
127
+ chunked/d4-dock_099.csv.gz filter=lfs diff=lfs merge=lfs -text
128
+ chunked/d4-dock_103.csv.gz filter=lfs diff=lfs merge=lfs -text
129
+ chunked/d4-dock_082.csv.gz filter=lfs diff=lfs merge=lfs -text
130
+ chunked/d4-dock_083.csv.gz filter=lfs diff=lfs merge=lfs -text
131
+ chunked/d4-dock_102.csv.gz filter=lfs diff=lfs merge=lfs -text
132
+ chunked/d4-dock_007.csv.gz filter=lfs diff=lfs merge=lfs -text
133
+ chunked/d4-dock_016.csv.gz filter=lfs diff=lfs merge=lfs -text
134
+ chunked/d4-dock_046.csv.gz filter=lfs diff=lfs merge=lfs -text
135
+ chunked/d4-dock_051.csv.gz filter=lfs diff=lfs merge=lfs -text
136
+ chunked/d4-dock_057.csv.gz filter=lfs diff=lfs merge=lfs -text
137
+ chunked/d4-dock_005.csv.gz filter=lfs diff=lfs merge=lfs -text
138
+ chunked/d4-dock_043.csv.gz filter=lfs diff=lfs merge=lfs -text
139
+ chunked/d4-dock_053.csv.gz filter=lfs diff=lfs merge=lfs -text
140
+ chunked/d4-dock_056.csv.gz filter=lfs diff=lfs merge=lfs -text
141
+ chunked/d4-dock_085.csv.gz filter=lfs diff=lfs merge=lfs -text
142
+ chunked/d4-dock_030.csv.gz filter=lfs diff=lfs merge=lfs -text
143
+ chunked/d4-dock_036.csv.gz filter=lfs diff=lfs merge=lfs -text
144
+ chunked/d4-dock_067.csv.gz filter=lfs diff=lfs merge=lfs -text
145
+ chunked/d4-dock_092.csv.gz filter=lfs diff=lfs merge=lfs -text
146
+ chunked/d4-dock_010.csv.gz filter=lfs diff=lfs merge=lfs -text
147
+ chunked/d4-dock_035.csv.gz filter=lfs diff=lfs merge=lfs -text
148
+ chunked/d4-dock_075.csv.gz filter=lfs diff=lfs merge=lfs -text
149
+ chunked/d4-dock_094.csv.gz filter=lfs diff=lfs merge=lfs -text
150
+ chunked/d4-dock_013.csv.gz filter=lfs diff=lfs merge=lfs -text
151
+ chunked/d4-dock_017.csv.gz filter=lfs diff=lfs merge=lfs -text
152
+ chunked/d4-dock_023.csv.gz filter=lfs diff=lfs merge=lfs -text
153
+ chunked/d4-dock_026.csv.gz filter=lfs diff=lfs merge=lfs -text
154
+ chunked/d4-dock_065.csv.gz filter=lfs diff=lfs merge=lfs -text
155
+ chunked/d4-dock_048.csv.gz filter=lfs diff=lfs merge=lfs -text
156
+ chunked/d4-dock_068.csv.gz filter=lfs diff=lfs merge=lfs -text
157
+ chunked/d4-dock_100.csv.gz filter=lfs diff=lfs merge=lfs -text
158
+ chunked/d4-dock_106.csv.gz filter=lfs diff=lfs merge=lfs -text
159
+ chunked/d4-dock_024.csv.gz filter=lfs diff=lfs merge=lfs -text
160
+ chunked/d4-dock_044.csv.gz filter=lfs diff=lfs merge=lfs -text
161
+ chunked/d4-dock_074.csv.gz filter=lfs diff=lfs merge=lfs -text
162
+ chunked/d4-dock_076.csv.gz filter=lfs diff=lfs merge=lfs -text
163
+ chunked/d4-dock_098.csv.gz filter=lfs diff=lfs merge=lfs -text
164
+ chunked/d4-dock_011.csv.gz filter=lfs diff=lfs merge=lfs -text
165
+ chunked/d4-dock_108.csv.gz filter=lfs diff=lfs merge=lfs -text
166
+ chunked/d4-dock_032.csv.gz filter=lfs diff=lfs merge=lfs -text
167
+ chunked/d4-dock_058.csv.gz filter=lfs diff=lfs merge=lfs -text
168
+ chunked/d4-dock_077.csv.gz filter=lfs diff=lfs merge=lfs -text
169
+ chunked/d4-dock_087.csv.gz filter=lfs diff=lfs merge=lfs -text
170
+ chunked/d4-dock_001.csv.gz filter=lfs diff=lfs merge=lfs -text
171
+ chunked/d4-dock_027.csv.gz filter=lfs diff=lfs merge=lfs -text
172
+ chunked/d4-dock_071.csv.gz filter=lfs diff=lfs merge=lfs -text
173
+ chunked/d4-dock_037.csv.gz filter=lfs diff=lfs merge=lfs -text
174
+ chunked/d4-dock_080.csv.gz filter=lfs diff=lfs merge=lfs -text
175
+ chunked/d4-dock_095.csv.gz filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,192 @@
1
  ---
2
- license: mit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: cc-by-4.0
3
+ task_categories:
4
+ - text-classification
5
+ - zero-shot-classification
6
+ - text2text-generation
7
+ - translation
8
+ tags:
9
+ - chemistry
10
+ - SMILES
11
+ - docking
12
+ pretty_name: 'Ultra-large docking: D4 receptor 115M (Lyu J, Wang S, Balius T, Singh I, Nature 2019)'
13
+ size_categories:
14
+ - '100M<n<1B'
15
+ configs:
16
+ - config_name: main_data
17
+ data_files: "chunked/d4-dock_*.csv.gz"
18
+ sep: ","
19
+ default: true
20
  ---
21
+ # Ultra-large docking data: AmpC 96M compounds
22
+
23
+ These data are from John J. Irwin, Bryan L. Roth, and Brian K. Shoichet's labs. They published it as:
24
+
25
+ > [!NOTE]
26
+ > Lyu J, Wang S, Balius TE, Singh I, Levit A, Moroz YS, O'Meara MJ, Che T, Algaa E, Tolmachova K, Tolmachev AA, Shoichet BK, Roth BL, Irwin JJ.
27
+ Ultra-large library docking for discovering new chemotypes. Nature. 2019 Feb;566(7743):224-229. doi: [10.1038/s41586-019-0917-9](https://doi.org/10.1038/s41586-019-0917-9).
28
+ Epub 2019 Feb 6. PMID: [30728502](https://pubmed.ncbi.nlm.nih.gov/30728502/); PMCID: [PMC6383769](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6383769/).
29
+ >
30
+
31
+ ## Dataset Details
32
+
33
+ The compounds are represented as SMILES strings, and are annotated with ZINC IDs, heavy atom count (HAC), and DOCKscore. For convenience we have added molecuar weight,
34
+ Crippen cLogP, and topological surface area as calculated by RDKit (using [schemist](https://github.com/scbirlab/schemist)).
35
+
36
+
37
+ ### Dataset Description
38
+
39
+ <!-- Provide a longer summary of what this dataset is. -->
40
+
41
+ The authors of doi: [10.1038/s41586-019-0917-9](https://doi.org/10.1038/s41586-019-0917-9) carried out a massive dockign campaign to
42
+ see if increasing the numerb of compounds in virtual libraries would increase the number of docking hits that represent new active
43
+ chemical scaffolds that validate in the wet lab.
44
+
45
+ They docked libraries of ~100 million molecules to AmpC, a $\beta$-lactamase, and the D$_4$ dopamine receptor. **This dataset contains the
46
+ compounds and DOCKscores for D$_4$**. We removed compounds with anomalous DOCKscores, and used [schemist](https://github.com/scbirlab/schemist)
47
+ to add molecuar weight, Crippen cLogP, and topological surface area.
48
+
49
+ <!-- - - **Curated by:** @eachanjohnson -->
50
+ <!-- - - **Funded by [optional]:** [The Francis Crick Institute] -->
51
+ <!-- - - **Shared by [optional]:** [More Information Needed] -->
52
+ - **License:** [cc-by-4.0](https://creativecommons.org/licenses/by/4.0/)
53
+
54
+ ### Dataset Sources
55
+
56
+ <!-- Provide the basic links for the dataset. -->
57
+
58
+ - **Repository:** FigShare doi: [10.6084/m9.figshare.7359401.v3](https://doi.org/10.6084/m9.figshare.7359401.v3)
59
+ - **Paper:** doi: [10.1038/s41586-019-0917-9](https://doi.org/10.1038/s41586-019-0917-9)
60
+ <!-- - **Demo [optional]:** [More Information Needed] -->
61
+
62
+ <!-- ## Uses
63
+
64
+ <!-- Address questions around how the dataset is intended to be used. -->
65
+
66
+ ### Direct Use
67
+
68
+ <!-- This section describes suitable use cases for the dataset. -->
69
+
70
+ - Chemical property prediction
71
+
72
+ <!-- ### Out-of-Scope Use -->
73
+
74
+ <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
75
+
76
+ <!-- [More Information Needed] -->
77
+
78
+ <!-- ## Dataset Structure -->
79
+
80
+ <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
81
+
82
+ <!-- [More Information Needed] -->
83
+
84
+ <!-- ## Dataset Creation
85
+
86
+ <!-- ### Curation Rationale
87
+
88
+ <!-- Motivation for the creation of this dataset. -->
89
+
90
+ <!-- [More Information Needed] -->
91
+
92
+ ### Source Data
93
+
94
+ Lyu J, Wang S, Balius TE, Singh I, Levit A, Moroz YS, O'Meara MJ, Che T, Algaa E, Tolmachova K, Tolmachev AA, Shoichet BK, Roth BL, Irwin JJ.
95
+ Ultra-large library docking for discovering new chemotypes. Nature. 2019 Feb;566(7743):224-229. doi: [10.1038/s41586-019-0917-9](https://doi.org/10.1038/s41586-019-0917-9).
96
+ Epub 2019 Feb 6. PMID: [30728502](https://pubmed.ncbi.nlm.nih.gov/30728502/); PMCID: [PMC6383769](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6383769/).
97
+
98
+ <!-- #### Data Collection and Processing
99
+
100
+ <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
101
+
102
+ <!-- [More Information Needed] -->
103
+
104
+ #### Who are the source data producers?
105
+
106
+ Jiankun Lyu†, Sheng Wang†, Trent E. Balius†, Isha Singh†, Anat Levit, Yurii S. Moroz, Matthew J. O’Meara, Tao Che, Enkhjargal Algaa, Kateryna Tolmachova,
107
+ Andrey A. Tolmachev, Brian K. Shoichet*, Bryan L. Roth*, and John J. Irwin*
108
+
109
+ †These authors contributed equally.
110
+ *Corresponding authors.
111
+
112
+ ### Annotations
113
+
114
+ We used [schemist](https://github.com/scbirlab/schemist) (which in turn uses RDKit)
115
+ to add molecuar weight, Crippen cLogP, and topological surface area.
116
+
117
+ <!-- #### Annotation process -->
118
+
119
+ <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
120
+
121
+ <!-- #### Who are the annotators? -->
122
+
123
+ <!-- This section describes the people or systems who created the annotations. -->
124
+
125
+ <!-- [More Information Needed] -->
126
+
127
+ <!-- #### Personal and Sensitive Information -->
128
+
129
+ <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
130
+
131
+ <!-- [More Information Needed] -->
132
+
133
+ <!-- ## Bias, Risks, and Limitations -->
134
+
135
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
136
+
137
+ <!-- [More Information Needed] -->
138
+
139
+ <!-- ### Recommendations -->
140
+
141
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
142
+
143
+ Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
144
+
145
+ ## Citation
146
+
147
+ <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
148
+
149
+ **BibTeX:**
150
+
151
+ ```
152
+ @article{10.1038/s41586-019-0917-9,
153
+ year = {2019},
154
+ title = {{Ultra-large library docking for discovering new chemotypes}},
155
+ author = {Lyu, Jiankun and Wang, Sheng and Balius, Trent E. and Singh, Isha and Levit, Anat and Moroz, Yurii S. and O’Meara, Matthew J. and Che, Tao and Algaa, Enkhjargal and Tolmachova, Kateryna and Tolmachev, Andrey A. and Shoichet, Brian K. and Roth, Bryan L. and Irwin, John J.},
156
+ journal = {Nature},
157
+ issn = {0028-0836},
158
+ doi = {10.1038/s41586-019-0917-9},
159
+ pmid = {30728502},
160
+ pmcid = {PMC6383769},
161
+ url = {https://www.ncbi.nlm.nih.gov/pubmed/30728502},
162
+ abstract = {{Despite intense interest in expanding chemical space, libraries containing hundreds-of-millions to billions of diverse molecules have remained inaccessible. Here we investigate structure-based docking of 170 million make-on-demand compounds from 130 well-characterized reactions. The resulting library is diverse, representing over 10.7 million scaffolds that are otherwise unavailable. For each compound in the library, docking against AmpC β-lactamase (AmpC) and the D4 dopamine receptor were simulated. From the top-ranking molecules, 44 and 549 compounds were synthesized and tested for interactions with AmpC and the D4 dopamine receptor, respectively. We found a phenolate inhibitor of AmpC, which revealed a group of inhibitors without known precedent. This molecule was optimized to 77 nM, which places it among the most potent non-covalent AmpC inhibitors known. Crystal structures of this and other AmpC inhibitors confirmed the docking predictions. Against the D4 dopamine receptor, hit rates fell almost monotonically with docking score, and a hit-rate versus score curve predicted that the library contained 453,000 ligands for the D4 dopamine receptor. Of 81 new chemotypes discovered, 30 showed submicromolar activity, including a 180-pM subtype-selective agonist of the D4 dopamine receptor. Using a make-on-demand library that contains hundreds-of-millions of molecules, structure-based docking was used to identify compounds that, after synthesis and testing, are shown to interact with AmpC β-lactamase and the D4 dopamine receptor with high affinity.}},
163
+ pages = {224--229},
164
+ number = {7743},
165
+ volume = {566},
166
+ keywords = {}
167
+ }
168
+ ```
169
+
170
+ **APA:**
171
+
172
+ Lyu, J., Wang, S., Balius, T. E., Singh, I., Levit, A., Moroz, Y. S., O'Meara, M. J., Che, T., Algaa, E., Tolmachova, K., Tolmachev, A. A., Shoichet, B. K.,
173
+ Roth, B. L., & Irwin, J. J. (2019). Ultra-large library docking for discovering new chemotypes. Nature, 566(7743), 224–229.
174
+ https://doi.org/10.1038/s41586-019-0917-9
175
+
176
+ <!-- ## Glossary [optional]
177
+
178
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
179
+
180
+ <!-- [More Information Needed]
181
+
182
+ <!-- ## More Information [optional]
183
+
184
+ [More Information Needed] -->
185
+
186
+ ## Dataset Card Authors [optional]
187
+
188
+ @eachanjohnson
189
+
190
+ <!-- ## Dataset Card Contact
191
+
192
+ [More Information Needed] -->
chunked/d4-dock_000.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fbf7eece9634157d8c26fcf503749fc270219d7e3b41e816998d5eba3aba525
3
+ size 36883772
chunked/d4-dock_000.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f5480e92a20>
14
+
15
+ Error counts:
16
+ id: 1
17
+ smiles: 1
18
+ scaffold: 1
19
+ mwt: 1
20
+ clogp: 1
21
+ tpsa: 1
22
+ ⏰ Completed process in 0:22:30.151730
chunked/d4-dock_001.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fab050670785795466d8ed548e379366a477dab3ee415efd9e08c8b38fa24e23
3
+ size 33259263
chunked/d4-dock_001.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f62d4f42a20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:21:46.451323
chunked/d4-dock_002.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:663ac4ef0791b88ad1002f8deec97872b2e979f59e5487c4d4ee083ad9ff1a74
3
+ size 31300140
chunked/d4-dock_002.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7fb87c82aa20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:21:27.089338
chunked/d4-dock_003.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1deec28e48b6f0ce2b2b9a0f6ed6e886d59a47bab8090504120aa21f8b7bb9d7
3
+ size 32727854
chunked/d4-dock_003.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f0362fe2a20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:26.164630
chunked/d4-dock_004.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58292b20ef6d44ce27aee92a80e2384a775611b9dbf7c8a53566dc561c489666
3
+ size 33145400
chunked/d4-dock_004.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f6fce37ea20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:30.271494
chunked/d4-dock_005.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14d2024711a4b7e4f92c08a15f9e86bd071f44cdef5d967b919f46084bf2b99e
3
+ size 31891519
chunked/d4-dock_005.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f629decea20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:28.916154
chunked/d4-dock_006.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:936bc25e6f0ff8c2756dd1dac712a4bb7efd70f891cde846cb3e591c2a224b20
3
+ size 31855147
chunked/d4-dock_006.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7fb177b82a20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:21:55.902736
chunked/d4-dock_007.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:804c5b2b541826b4cbb4dd584eb08c04bf59cbb7f2abbc8207e71ade09868618
3
+ size 31167980
chunked/d4-dock_007.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f810868aa20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:21:26.395170
chunked/d4-dock_008.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88e2aeef148dab969863c714f7cef9ce1324b5980f06b65aef010734f2e4caed
3
+ size 32270484
chunked/d4-dock_008.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7fb9a384ea20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:37.260826
chunked/d4-dock_009.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe0a07b43939bbabd21b151040c967e34402b33792b07f864d13ed39e0768638
3
+ size 32043424
chunked/d4-dock_009.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f78c97c2a20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:46.049916
chunked/d4-dock_010.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4e18b29f195255185264bd1b1c34935d3af9f5b40cba6ec3281d78bc92bed44
3
+ size 37493619
chunked/d4-dock_010.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f18e09bea20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:34.399038
chunked/d4-dock_011.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad7c7208153e863ed0a0660248c9754da4f534fa9381eaba41f3f14929067dcc
3
+ size 37019065
chunked/d4-dock_011.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7fe5ea7d2a20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:37.508337
chunked/d4-dock_012.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0724f18747bec61e6e324cbcd22c15703dcae8e67d32b58a27c69397efc4d985
3
+ size 35115068
chunked/d4-dock_012.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f69bda9aa20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:32.197440
chunked/d4-dock_013.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baef4ecee711ac645d7195aa2a1d2b7a5ae197fce8a304ae629d434392c0f131
3
+ size 36727919
chunked/d4-dock_013.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7fde5ad8aa20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:56.023123
chunked/d4-dock_014.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dfacec9693024c6e9316725f43a9dd6376da1288615f30e6c11d22bf83e8d88
3
+ size 35948155
chunked/d4-dock_014.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7fcf27cf6a20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:23:14.210133
chunked/d4-dock_015.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7a7c091f92a95b1ecd08338fc06bf1d75c7f048d48b57dc5e943e366d4dd2a2
3
+ size 32997781
chunked/d4-dock_015.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7efe1959ea20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:15.600828
chunked/d4-dock_016.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:028789d1ad5a2f4e9c926db67b197bc513674d5a356d1cf60e4a17b9f34c83d5
3
+ size 34047310
chunked/d4-dock_016.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7fcaa53b6a20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:23:22.738810
chunked/d4-dock_017.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ecd04b1a9d416b0c1b8a9226adef096104bd1f0acd44b7168bd940fc99543f3
3
+ size 34795832
chunked/d4-dock_017.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f751af2aa20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:39.730332
chunked/d4-dock_018.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d1187cf0bdbfa3009b4442a7bda3298ce476b4e1a9c0e021d9631c391c293d5
3
+ size 34651382
chunked/d4-dock_018.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7fc1d7072a20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:35.181625
chunked/d4-dock_019.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68b3387eb7ff868b36bfce5d6652c072c0e1385853653f64aa69bf2285ad253a
3
+ size 34830381
chunked/d4-dock_019.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f729493ea20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:10.920267
chunked/d4-dock_020.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f795ff1496d5188921f00092efcb0bbc261ef139a6cc3f3805d4e995cfde1e3f
3
+ size 34216923
chunked/d4-dock_020.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f050c022a20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:35.579393
chunked/d4-dock_021.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68e56cee0f208cb332edf6106f21e4e2c4d2457b11f4e0ec4cb58768d5e028a7
3
+ size 35098331
chunked/d4-dock_021.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7faa37802a20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:24.991804
chunked/d4-dock_022.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c66bf746022b3026b02ca949b53a668cfabe06dc7e2413fcd3a6382b636671e6
3
+ size 33965782
chunked/d4-dock_022.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f5308876a20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:03.134320
chunked/d4-dock_023.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bd86417cfa5802832b464a4f05f8589a84970359d9043214473f4cb45d2e39c
3
+ size 35142170
chunked/d4-dock_023.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Failed to find the pandas get_adjustment() function to patch
2
+ Failed to patch pandas - PandasTools will have limited functionality
3
+ 🚀 Converting between string representations with the following parameters:
4
+ subcommand: convert
5
+ output: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
6
+ format: csv
7
+ input: <_io.TextIOWrapper name='<stdin>' mode='r' encoding='utf-8'>
8
+ representation: SMILES
9
+ column: smiles
10
+ prefix: None
11
+ to: ['id', 'smiles', 'scaffold', 'mwt', 'clogp', 'tpsa']
12
+ options: ['prefix=SCB-']
13
+ func: <function _convert at 0x7f25c8ebea20>
14
+
15
+ Error counts:
16
+ id: 0
17
+ smiles: 0
18
+ scaffold: 0
19
+ mwt: 0
20
+ clogp: 0
21
+ tpsa: 0
22
+ ⏰ Completed process in 0:22:46.552937