From a6f3797d76e4cdc59d7e5ec75c919bc411b80e4c Mon Sep 17 00:00:00 2001 From: Jalil Nourisa Date: Wed, 23 Jul 2025 19:29:11 +0400 Subject: [PATCH] scores are completed with trace --- results/grn/data/dataset_info.json | 52 + results/grn/data/method_info.json | 210 ++ results/grn/data/metric_execution_info.json | 548 ++++ results/grn/data/metric_info.json | 122 + results/grn/data/quality_control.json | 2592 +++++++++++++++++++ results/grn/data/results.json | 2134 +++++++++++++++ results/grn/data/state.yaml | 9 + results/grn/data/task_info.json | 50 + results/grn/index.qmd | 0 9 files changed, 5717 insertions(+) create mode 100644 results/grn/data/dataset_info.json create mode 100644 results/grn/data/method_info.json create mode 100644 results/grn/data/metric_execution_info.json create mode 100644 results/grn/data/metric_info.json create mode 100644 results/grn/data/quality_control.json create mode 100644 results/grn/data/results.json create mode 100644 results/grn/data/state.yaml create mode 100644 results/grn/data/task_info.json create mode 100644 results/grn/index.qmd diff --git a/results/grn/data/dataset_info.json b/results/grn/data/dataset_info.json new file mode 100644 index 00000000..a6e3cf89 --- /dev/null +++ b/results/grn/data/dataset_info.json @@ -0,0 +1,52 @@ +[ + { + "dataset_id": "op", + "dataset_name": "OPSCA", + "dataset_summary": "scRNA-seq data with 146 (originally) perturbations with chemical compounds on PBMCs. Multiome data available for the control compound.", + "dataset_description": "Novel single-cell perturbational dataset in human peripheral blood mononuclear cells (PBMCs). 144 compounds were selected from the Library of Integrated Network-Based Cellular Signatures (LINCS) Connectivity Map dataset (PMID: 29195078) and measured single-cell gene expression profiles after 24 hours of treatment. The experiment was repeated in three healthy human donors, and the compounds were selected based on diverse transcriptional signatures observed in CD34+ hematopoietic stem cells (data not released). This experiment was performed in human PBMCs because the cells are commercially available with pre-obtained consent for public release and PBMCs are a primary, disease-relevant tissue that contains multiple mature cell types (including T-cells, B-cells, myeloid cells, and NK cells) with established markers for annotation of cell types. To supplement this dataset, joint scRNA and single-cell chromatin accessibility measurements were measured from the baseline compound using the 10x Multiome assay. ", + "data_reference": null, + "data_url": null, + "date_created": "27-06-2025", + "file_size": 1032078876 + }, + { + "dataset_id": "nakatake", + "dataset_name": "Nakatake", + "dataset_summary": "RNA-seq data with 463 perturbations (overexpression) on SEES3 cells.", + "dataset_description": "Transcription factors (TFs) play a pivotal role in determining cell states, yet our understanding of the causative relationship between TFs and cell states is limited. Here, we systematically examine the state changes of human pluripotent embryonic stem cells (hESCs) by the large-scale manipulation of single TFs. We establish 2,135 hESC lines, representing three clones each of 714 doxycycline (Dox)-inducible genes including 481 TFs, and obtain 26,998 microscopic cell images and 2,174 transcriptome datasets-RNA sequencing (RNA-seq) or microarrays-48 h after the presence or absence of Dox. Interestingly, the expression of essentially all the genes, including genes located in heterochromatin regions, are perturbed by these TFs. TFs are also characterized by their ability to induce differentiation of hESCs into specific cell lineages. These analyses help to provide a way of classifying TFs and identifying specific sets of TFs for directing hESC differentiation into desired cell types.", + "data_reference": null, + "data_url": null, + "date_created": "22-07-2025", + "file_size": 283939072 + }, + { + "dataset_id": "norman", + "dataset_name": "Norman", + "dataset_summary": "Single cell RNA-seq data with 231 perturbations (activation) on K562 cells.", + "dataset_description": "How cellular and organismal complexity emerges from combinatorial expression of genes is a central question in biology. High-content phenotyping approaches such as Perturb-seq (single-cell RNA-seq pooled CRISPR screens) present an opportunity for exploring such genetic interactions (GIs) at scale. Here, we present an analytical framework for interpreting high-dimensional landscapes of cell states (manifolds) constructed from transcriptional phenotypes. We applied this approach to Perturb-seq profiling of strong GIs mined from a growth-based, gain-of-function GI map. Exploration of this manifold enabled ordering of regulatory pathways, principled classification of GIs (e.g. identifying suppressors), and mechanistic elucidation of synergistic interactions, including an unexpected synergy between CBL and CNN1 driving erythroid differentiation. Finally, we apply recommender system machine learning to predict interactions, facilitating exploration of vastly larger GI manifolds.", + "data_reference": null, + "data_url": null, + "date_created": "27-06-2025", + "file_size": 1066293309 + }, + { + "dataset_id": "replogle", + "dataset_name": "Replogle", + "dataset_summary": "Single cell RNA-seq data with 9722 perturbations (KO) on K562 cells.", + "dataset_description": "A central goal of genetics is to define the relationships between genotypes and phenotypes. High-content phenotypic screens such as Perturb-seq (CRISPR-based screens with single-cell RNA-sequencing readouts) enable massively parallel functional genomic mapping but, to date, have been used at limited scales. Here, we perform genome-scale Perturb-seq targeting all expressed genes with CRISPR interference (CRISPRi) across >2.5 million human cells. We use transcriptional phenotypes to predict the function of poorly characterized genes, uncovering new regulators of ribosome biogenesis (including CCDC86, ZNF236, and SPATA5L1), transcription (C7orf26), and mitochondrial respiration (TMEM242). In addition to assigning gene function, single-cell transcriptional phenotypes allow for in-depth dissection of complex cellular phenomena-from RNA processing to differentiation. We leverage this ability to systematically identify genetic drivers and consequences of aneuploidy and to discover an unanticipated layer of stress-specific regulation of the mitochondrial genome. Our information-rich genotype-phenotype map reveals a multidimensional portrait of gene and cellular function.", + "data_reference": null, + "data_url": null, + "date_created": "26-06-2025", + "file_size": 824850166 + }, + { + "dataset_id": "adamson", + "dataset_name": "Adamson", + "dataset_summary": "Single cell RNA-seq data with 82 perturbations (KD) on K562 cells.", + "dataset_description": "Functional genomics efforts face tradeoffs between number of perturbations examined and complexity of phenotypes measured. We bridge this gap with Perturb-seq, which combines droplet-based single-cell RNA-seq with a strategy for barcoding CRISPR-mediated perturbations, allowing many perturbations to be profiled in pooled format. We applied Perturb-seq to dissect the mammalian unfolded protein response (UPR) using single and combinatorial CRISPR perturbations. Two genome-scale CRISPR interference (CRISPRi) screens identified genes whose repression perturbs ER homeostasis. Subjecting ∼100 hits to Perturb-seq enabled high-precision functional clustering of genes. Single-cell analyses decoupled the three UPR branches, revealed bifurcated UPR branch activation among cells subject to the same perturbation, and uncovered differential activation of the branches across hits, including an isolated feedback loop between the translocon and IRE1α. These studies provide insight into how the three sensors of ER homeostasis monitor distinct types of stress and highlight the ability of Perturb-seq to dissect complex cellular responses.", + "data_reference": null, + "data_url": null, + "date_created": "27-06-2025", + "file_size": 815772353 + } +] diff --git a/results/grn/data/method_info.json b/results/grn/data/method_info.json new file mode 100644 index 00000000..03e7f374 --- /dev/null +++ b/results/grn/data/method_info.json @@ -0,0 +1,210 @@ +[ + { + "task_id": "control_methods", + "method_id": "pearson_corr", + "method_name": "pearson_corr", + "method_summary": "Baseline based on correlation", + "method_description": "Baseline GRN inference method using Pearson correlation.\n", + "is_baseline": true, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/control_methods/pearson_corr:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/control_methods/pearson_corr", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "control_methods", + "method_id": "negative_control", + "method_name": "Negative control", + "method_summary": "Source-target links based on random assignment", + "method_description": "Randomly assigns regulatory links to tf-target links with a given tf and target list. This is to perform near random.\n", + "is_baseline": true, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/control_methods/negative_control:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/control_methods/negative_control", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "control_methods", + "method_id": "positive_control", + "method_name": "positive_control", + "method_summary": "Baseline based on correlation", + "method_description": "Baseline model based on Pearson correlation that uses both inference and evaluation dataset to infer the GRN.\n", + "is_baseline": true, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/control_methods/positive_control:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/control_methods/positive_control", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "grn_methods", + "method_id": "portia", + "method_name": "portia", + "method_summary": "GRN inference using PORTIA", + "method_description": "GRN inference using PORTIA.\n", + "is_baseline": false, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/methods/single_omics/portia:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/methods/single_omics/portia", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "grn_methods", + "method_id": "ppcor", + "method_name": "ppcor", + "method_summary": "GRN inference using PPCOR", + "method_description": "GRN inference using PPCOR.\n", + "is_baseline": false, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/methods/single_omics/ppcor:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/methods/single_omics/ppcor", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "grn_methods", + "method_id": "scenic", + "method_name": "scenic", + "method_summary": "GRN inference using scenic", + "method_description": "GRN inference using Scenic pipeline.\n", + "is_baseline": false, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/methods/single_omics/scenic:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/methods/single_omics/scenic", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "grn_methods", + "method_id": "scenicplus", + "method_name": "scenicplus", + "method_summary": "GRN inference using scenicplus", + "method_description": "GRN inference using scenicplus.\n", + "is_baseline": false, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/methods/multi_omics/scenicplus:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/methods/multi_omics/scenicplus", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "grn_methods", + "method_id": "scprint", + "method_name": "scprint", + "method_summary": "GRN inference using scPRINT", + "method_description": "GRN inference using scPRINT.\n", + "is_baseline": false, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/methods/single_omics/scprint:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/methods/single_omics/scprint", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "grn_methods", + "method_id": "grnboost", + "method_name": "grnboost", + "method_summary": "GRN inference using GRNBoost2", + "method_description": "GRN inference using GRNBoost2.\n", + "is_baseline": false, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/methods/single_omics/grnboost:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/methods/single_omics/grnboost", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "grn_methods", + "method_id": "scglue", + "method_name": "scglue", + "method_summary": "GRN inference using scglue", + "method_description": "GRN inference using scglue. \n", + "is_baseline": false, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/methods/multi_omics/scglue:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/methods/multi_omics/scglue", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "grn_methods", + "method_id": "granie", + "method_name": "granie", + "method_summary": "GRN inference using GRaNIE", + "method_description": "GRN inference using GRaNIE\n", + "is_baseline": false, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/methods/multi_omics/granie:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/methods/multi_omics/granie", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "grn_methods", + "method_id": "figr", + "method_name": "figr", + "method_summary": "GRN inference using figr", + "method_description": "GRN inference using figr.\n", + "is_baseline": false, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": null, + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/methods/multi_omics/figr:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/methods/multi_omics/figr", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + }, + { + "task_id": "grn_methods", + "method_id": "celloracle", + "method_name": "celloracle", + "method_summary": "GRN inference using celloracle", + "method_description": "GRN inference using celloracle.\n", + "is_baseline": false, + "references_doi": null, + "references_bibtex": null, + "code_url": "https://github.com/openproblems-bio/task_grn_inference", + "documentation_url": "https://morris-lab.github.io/CellOracle.documentation/", + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/methods/multi_omics/celloracle:build_main", + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/methods/multi_omics/celloracle", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3" + } +] diff --git a/results/grn/data/metric_execution_info.json b/results/grn/data/metric_execution_info.json new file mode 100644 index 00000000..019229ec --- /dev/null +++ b/results/grn/data/metric_execution_info.json @@ -0,0 +1,548 @@ +[ + { + "dataset_id": null, + "method_id": "celloracle", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-27 15:25:09", + "exit_code": 0, + "duration_sec": 612, + "cpu_pct": 142.7, + "peak_memory_mb": 33280, + "disk_read_mb": 3892, + "disk_write_mb": 2 + } + }, + { + "dataset_id": null, + "method_id": "celloracle", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-27 15:25:09", + "exit_code": 0, + "duration_sec": 1020, + "cpu_pct": 4042.9, + "peak_memory_mb": 11367, + "disk_read_mb": 5838, + "disk_write_mb": 3 + } + }, + { + "dataset_id": null, + "method_id": "celloracle", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-27 15:25:09", + "exit_code": 0, + "duration_sec": 5.1, + "cpu_pct": 618, + "peak_memory_mb": 2151, + "disk_read_mb": 72, + "disk_write_mb": 3 + } + }, + { + "dataset_id": null, + "method_id": "figr", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-27 21:52:30", + "exit_code": 0, + "duration_sec": 598, + "cpu_pct": 131.9, + "peak_memory_mb": 33280, + "disk_read_mb": 4096, + "disk_write_mb": 2 + } + }, + { + "dataset_id": null, + "method_id": "figr", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-27 21:52:30", + "exit_code": 0, + "duration_sec": 1041, + "cpu_pct": 4875.7, + "peak_memory_mb": 12698, + "disk_read_mb": 6144, + "disk_write_mb": 3 + } + }, + { + "dataset_id": null, + "method_id": "figr", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-27 21:52:30", + "exit_code": 0, + "duration_sec": 6.9, + "cpu_pct": 462.1, + "peak_memory_mb": 4301, + "disk_read_mb": 192, + "disk_write_mb": 3 + } + }, + { + "dataset_id": null, + "method_id": "granie", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-27 12:49:09", + "exit_code": 0, + "duration_sec": 706, + "cpu_pct": 136.5, + "peak_memory_mb": 33485, + "disk_read_mb": 3892, + "disk_write_mb": 2 + } + }, + { + "dataset_id": null, + "method_id": "granie", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-27 12:49:09", + "exit_code": 0, + "duration_sec": 1002, + "cpu_pct": 5376.6, + "peak_memory_mb": 12698, + "disk_read_mb": 5838, + "disk_write_mb": 3 + } + }, + { + "dataset_id": null, + "method_id": "granie", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-27 12:49:09", + "exit_code": 0, + "duration_sec": 5.7, + "cpu_pct": 458.7, + "peak_memory_mb": 2151, + "disk_read_mb": 84, + "disk_write_mb": 3 + } + }, + { + "dataset_id": null, + "method_id": "grnboost", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-26 19:19:45", + "exit_code": 0, + "duration_sec": 1580.8, + "cpu_pct": 175.3524, + "peak_memory_mb": 33690, + "disk_read_mb": 5674, + "disk_write_mb": 10 + } + }, + { + "dataset_id": null, + "method_id": "grnboost", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-26 19:19:45", + "exit_code": 0, + "duration_sec": 4014, + "cpu_pct": 2511.6221, + "peak_memory_mb": 18535, + "disk_read_mb": 8523, + "disk_write_mb": 15 + } + }, + { + "dataset_id": null, + "method_id": "grnboost", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-26 19:19:45", + "exit_code": 0, + "duration_sec": 1926.9, + "cpu_pct": 103.0815, + "peak_memory_mb": 16282, + "disk_read_mb": 66432, + "disk_write_mb": 12 + } + }, + { + "dataset_id": null, + "method_id": "negative_control", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-26 14:00:15", + "exit_code": 0, + "duration_sec": 1741.6, + "cpu_pct": 152.4172, + "peak_memory_mb": 33997, + "disk_read_mb": 5662, + "disk_write_mb": 10 + } + }, + { + "dataset_id": null, + "method_id": "negative_control", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-26 14:00:15", + "exit_code": 0, + "duration_sec": 3876, + "cpu_pct": 2514.7477, + "peak_memory_mb": 18432, + "disk_read_mb": 8499, + "disk_write_mb": 15 + } + }, + { + "dataset_id": null, + "method_id": "negative_control", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-26 14:00:15", + "exit_code": 0, + "duration_sec": 514.2, + "cpu_pct": 115.0684, + "peak_memory_mb": 16282, + "disk_read_mb": 66429, + "disk_write_mb": 12 + } + }, + { + "dataset_id": null, + "method_id": "pearson_corr", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-26 14:00:45", + "exit_code": 0, + "duration_sec": 1409.4, + "cpu_pct": 156.4501, + "peak_memory_mb": 33178, + "disk_read_mb": 5674, + "disk_write_mb": 10 + } + }, + { + "dataset_id": null, + "method_id": "pearson_corr", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-26 14:00:45", + "exit_code": 0, + "duration_sec": 4929, + "cpu_pct": 2029.7679, + "peak_memory_mb": 21197, + "disk_read_mb": 8523, + "disk_write_mb": 15 + } + }, + { + "dataset_id": null, + "method_id": "pearson_corr", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-26 14:00:44", + "exit_code": 0, + "duration_sec": 1271.4, + "cpu_pct": 105.3735, + "peak_memory_mb": 16282, + "disk_read_mb": 66432, + "disk_write_mb": 12 + } + }, + { + "dataset_id": null, + "method_id": "portia", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-26 14:09:34", + "exit_code": 0, + "duration_sec": 1627.2, + "cpu_pct": 155.0732, + "peak_memory_mb": 33792, + "disk_read_mb": 5678, + "disk_write_mb": 10 + } + }, + { + "dataset_id": null, + "method_id": "portia", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-26 14:09:35", + "exit_code": 0, + "duration_sec": 3906, + "cpu_pct": 2457.7826, + "peak_memory_mb": 18432, + "disk_read_mb": 8523, + "disk_write_mb": 15 + } + }, + { + "dataset_id": null, + "method_id": "portia", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-26 14:09:35", + "exit_code": 0, + "duration_sec": 2526.3, + "cpu_pct": 102.461, + "peak_memory_mb": 16282, + "disk_read_mb": 66432, + "disk_write_mb": 12 + } + }, + { + "dataset_id": null, + "method_id": "positive_control", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-26 14:01:15", + "exit_code": 0, + "duration_sec": 1389.2, + "cpu_pct": 149.7781, + "peak_memory_mb": 33178, + "disk_read_mb": 5674, + "disk_write_mb": 10 + } + }, + { + "dataset_id": null, + "method_id": "positive_control", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-26 14:01:15", + "exit_code": 0, + "duration_sec": 4851, + "cpu_pct": 2026.2121, + "peak_memory_mb": 21197, + "disk_read_mb": 8523, + "disk_write_mb": 15 + } + }, + { + "dataset_id": null, + "method_id": "positive_control", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-26 14:01:15", + "exit_code": 0, + "duration_sec": 1274.4, + "cpu_pct": 104.0625, + "peak_memory_mb": 16282, + "disk_read_mb": 66432, + "disk_write_mb": 12 + } + }, + { + "dataset_id": null, + "method_id": "ppcor", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-26 15:08:25", + "exit_code": 0, + "duration_sec": 1389.4, + "cpu_pct": 163.4842, + "peak_memory_mb": 32768, + "disk_read_mb": 4832, + "disk_write_mb": 8 + } + }, + { + "dataset_id": null, + "method_id": "ppcor", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-26 15:08:25", + "exit_code": 0, + "duration_sec": 2484, + "cpu_pct": 3261.3711, + "peak_memory_mb": 11367, + "disk_read_mb": 7248, + "disk_write_mb": 12 + } + }, + { + "dataset_id": null, + "method_id": "ppcor", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-26 15:08:25", + "exit_code": 0, + "duration_sec": 2495.7, + "cpu_pct": 103.5303, + "peak_memory_mb": 16282, + "disk_read_mb": 66117, + "disk_write_mb": 12 + } + }, + { + "dataset_id": null, + "method_id": "scenic", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-27 05:19:56", + "exit_code": 0, + "duration_sec": 1547.8, + "cpu_pct": 136.4555, + "peak_memory_mb": 32154, + "disk_read_mb": 5614, + "disk_write_mb": 10 + } + }, + { + "dataset_id": null, + "method_id": "scenic", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-27 05:19:56", + "exit_code": 0, + "duration_sec": 3918, + "cpu_pct": 2556.3238, + "peak_memory_mb": 18432, + "disk_read_mb": 8427, + "disk_write_mb": 15 + } + }, + { + "dataset_id": null, + "method_id": "scenic", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-27 05:19:56", + "exit_code": 0, + "duration_sec": 594.9, + "cpu_pct": 113.3731, + "peak_memory_mb": 16282, + "disk_read_mb": 65811, + "disk_write_mb": 12 + } + }, + { + "dataset_id": null, + "method_id": "scenicplus", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-28 07:03:51", + "exit_code": 0, + "duration_sec": 670, + "cpu_pct": 120.4, + "peak_memory_mb": 32052, + "disk_read_mb": 3892, + "disk_write_mb": 2 + } + }, + { + "dataset_id": null, + "method_id": "scenicplus", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-28 07:03:51", + "exit_code": 0, + "duration_sec": 1026, + "cpu_pct": 5425.1, + "peak_memory_mb": 12698, + "disk_read_mb": 5838, + "disk_write_mb": 3 + } + }, + { + "dataset_id": null, + "method_id": "scenicplus", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-28 07:03:51", + "exit_code": 0, + "duration_sec": 4.8, + "cpu_pct": 637.3, + "peak_memory_mb": 2151, + "disk_read_mb": 66, + "disk_write_mb": 3 + } + }, + { + "dataset_id": null, + "method_id": "scglue", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-27 14:31:49", + "exit_code": 0, + "duration_sec": 610, + "cpu_pct": 129.7, + "peak_memory_mb": 33076, + "disk_read_mb": 3892, + "disk_write_mb": 2 + } + }, + { + "dataset_id": null, + "method_id": "scglue", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-27 14:31:49", + "exit_code": 0, + "duration_sec": 1029, + "cpu_pct": 4005.3, + "peak_memory_mb": 11264, + "disk_read_mb": 5838, + "disk_write_mb": 3 + } + }, + { + "dataset_id": null, + "method_id": "scglue", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-27 14:31:49", + "exit_code": 0, + "duration_sec": 5.1, + "cpu_pct": 626.6, + "peak_memory_mb": 2151, + "disk_read_mb": 54, + "disk_write_mb": 3 + } + }, + { + "dataset_id": null, + "method_id": "scprint", + "metric_component_name": "regression_1", + "resources": { + "submit": "2025-06-27 06:57:41", + "exit_code": 0, + "duration_sec": 781, + "cpu_pct": 134.5572, + "peak_memory_mb": 33485, + "disk_read_mb": 4452, + "disk_write_mb": 4 + } + }, + { + "dataset_id": null, + "method_id": "scprint", + "metric_component_name": "regression_2", + "resources": { + "submit": "2025-06-27 06:57:41", + "exit_code": 0, + "duration_sec": 1287, + "cpu_pct": 4256.307, + "peak_memory_mb": 12698, + "disk_read_mb": 6678, + "disk_write_mb": 6 + } + }, + { + "dataset_id": null, + "method_id": "scprint", + "metric_component_name": "ws_distance", + "resources": { + "submit": "2025-06-27 06:57:41", + "exit_code": 0, + "duration_sec": 29.7, + "cpu_pct": 240.3636, + "peak_memory_mb": 7578, + "disk_read_mb": 6723, + "disk_write_mb": 6 + } + } +] diff --git a/results/grn/data/metric_info.json b/results/grn/data/metric_info.json new file mode 100644 index 00000000..d84eb02b --- /dev/null +++ b/results/grn/data/metric_info.json @@ -0,0 +1,122 @@ +[ + { + "task_id": "metrics", + "component_name": "regression_1", + "metric_id": "r1_all", + "metric_name": "R1 (all)", + "metric_summary": "Regression 1 score for all genes with mean gene expression set for missing genes", + "metric_description": "Regression 1 score for all genes with mean gene expression set for missing genes\n", + "references_doi": null, + "references_bibtex": null, + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/metrics/regression_1", + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/metrics/regression_1:build_main", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3", + "maximize": true + }, + { + "task_id": "metrics", + "component_name": "regression_1", + "metric_id": "r1_grn", + "metric_name": "R1 (grn)", + "metric_summary": "Regression 1 score for only genes in the network", + "metric_description": "Regression 1 score for only genes in the network\n", + "references_doi": null, + "references_bibtex": null, + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/metrics/regression_1", + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/metrics/regression_1:build_main", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3", + "maximize": true + }, + { + "task_id": "metrics", + "component_name": "regression_2", + "metric_id": "r2-theta-0.0", + "metric_name": "R2 (precision)", + "metric_summary": "Captures the perfomance for the top regulatory links", + "metric_description": "Captures the perfomance for the top regulatory links\n", + "references_doi": null, + "references_bibtex": null, + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/metrics/regression_2", + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/metrics/regression_2:build_main", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3", + "maximize": true + }, + { + "task_id": "metrics", + "component_name": "regression_2", + "metric_id": "r2-theta-0.5", + "metric_name": "R2 (balanced)", + "metric_summary": "Balanced performance scores considering both prevision and recall", + "metric_description": "Balanced performance scores considering both prevision and recall\n", + "references_doi": null, + "references_bibtex": null, + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/metrics/regression_2", + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/metrics/regression_2:build_main", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3", + "maximize": true + }, + { + "task_id": "metrics", + "component_name": "regression_2", + "metric_id": "r2-theta-1.0", + "metric_name": "R2 (recall)", + "metric_summary": "Captures the perfomance for the more broad regulatory links (recall)", + "metric_description": "Captures the perfomance for the more broad regulatory links (recall)\n", + "references_doi": null, + "references_bibtex": null, + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/metrics/regression_2", + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/metrics/regression_2:build_main", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3", + "maximize": true + }, + { + "task_id": "metrics", + "component_name": "ws_distance", + "metric_id": "ws-theta-0.0", + "metric_name": "WS (precision)", + "metric_summary": "Captures the perfomance for the top regulatory links", + "metric_description": "Captures the perfomance for the top regulatory links\n", + "references_doi": null, + "references_bibtex": null, + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/metrics/ws_distance", + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/metrics/ws_distance:build_main", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3", + "maximize": true + }, + { + "task_id": "metrics", + "component_name": "ws_distance", + "metric_id": "ws-theta-0.5", + "metric_name": "WS (balanced)", + "metric_summary": "Balanced performance scores considering both prevision and recall", + "metric_description": "Balanced performance scores considering both prevision and recall\n", + "references_doi": null, + "references_bibtex": null, + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/metrics/ws_distance", + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/metrics/ws_distance:build_main", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3", + "maximize": true + }, + { + "task_id": "metrics", + "component_name": "ws_distance", + "metric_id": "ws-theta-1.0", + "metric_name": "WS (recall)", + "metric_summary": "Captures the perfomance for the more broad regulatory links (recall)", + "metric_description": "Captures the perfomance for the more broad regulatory links (recall)\n", + "references_doi": null, + "references_bibtex": null, + "implementation_url": "https://github.com/openproblems-bio/task_grn_inference/blob/e75f19a1d4059fe10339c29aa170eccf4dad48e3/src/metrics/ws_distance", + "image": "https://ghcr.io/openproblems-bio/task_grn_inference/metrics/ws_distance:build_main", + "code_version": "build_main", + "commit_sha": "e75f19a1d4059fe10339c29aa170eccf4dad48e3", + "maximize": true + } +] diff --git a/results/grn/data/quality_control.json b/results/grn/data/quality_control.json new file mode 100644 index 00000000..70dc41b3 --- /dev/null +++ b/results/grn/data/quality_control.json @@ -0,0 +1,2592 @@ +[ + { + "task_id": "task_grn_inference", + "category": "Task info", + "name": "Pct 'task_id' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing([task_info], field)", + "message": "Task metadata field 'task_id' should be defined\n Task id: task_grn_inference\n Field: task_id\n" + }, + { + "task_id": "task_grn_inference", + "category": "Task info", + "name": "Pct 'task_name' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing([task_info], field)", + "message": "Task metadata field 'task_name' should be defined\n Task id: task_grn_inference\n Field: task_name\n" + }, + { + "task_id": "task_grn_inference", + "category": "Task info", + "name": "Pct 'task_summary' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing([task_info], field)", + "message": "Task metadata field 'task_summary' should be defined\n Task id: task_grn_inference\n Field: task_summary\n" + }, + { + "task_id": "task_grn_inference", + "category": "Task info", + "name": "Pct 'task_description' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing([task_info], field)", + "message": "Task metadata field 'task_description' should be defined\n Task id: task_grn_inference\n Field: task_description\n" + }, + { + "task_id": "task_grn_inference", + "category": "Method info", + "name": "Pct 'task_id' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(method_info, field)", + "message": "Method metadata field 'task_id' should be defined\n Task id: task_grn_inference\n Field: task_id\n" + }, + { + "task_id": "task_grn_inference", + "category": "Method info", + "name": "Pct 'commit_sha' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(method_info, field)", + "message": "Method metadata field 'commit_sha' should be defined\n Task id: task_grn_inference\n Field: commit_sha\n" + }, + { + "task_id": "task_grn_inference", + "category": "Method info", + "name": "Pct 'method_id' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(method_info, field)", + "message": "Method metadata field 'method_id' should be defined\n Task id: task_grn_inference\n Field: method_id\n" + }, + { + "task_id": "task_grn_inference", + "category": "Method info", + "name": "Pct 'method_name' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(method_info, field)", + "message": "Method metadata field 'method_name' should be defined\n Task id: task_grn_inference\n Field: method_name\n" + }, + { + "task_id": "task_grn_inference", + "category": "Method info", + "name": "Pct 'method_summary' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(method_info, field)", + "message": "Method metadata field 'method_summary' should be defined\n Task id: task_grn_inference\n Field: method_summary\n" + }, + { + "task_id": "task_grn_inference", + "category": "Method info", + "name": "Pct 'paper_reference' missing", + "value": 0.7692307692307693, + "severity": 2, + "severity_value": 3.0, + "code": "percent_missing(method_info, field)", + "message": "Method metadata field 'paper_reference' should be defined\n Task id: task_grn_inference\n Field: paper_reference\n" + }, + { + "task_id": "task_grn_inference", + "category": "Method info", + "name": "Pct 'is_baseline' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(method_info, field)", + "message": "Method metadata field 'is_baseline' should be defined\n Task id: task_grn_inference\n Field: is_baseline\n" + }, + { + "task_id": "task_grn_inference", + "category": "Metric info", + "name": "Pct 'task_id' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(metric_info, field)", + "message": "Metric metadata field 'task_id' should be defined\n Task id: task_grn_inference\n Field: task_id\n" + }, + { + "task_id": "task_grn_inference", + "category": "Metric info", + "name": "Pct 'commit_sha' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(metric_info, field)", + "message": "Metric metadata field 'commit_sha' should be defined\n Task id: task_grn_inference\n Field: commit_sha\n" + }, + { + "task_id": "task_grn_inference", + "category": "Metric info", + "name": "Pct 'metric_id' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(metric_info, field)", + "message": "Metric metadata field 'metric_id' should be defined\n Task id: task_grn_inference\n Field: metric_id\n" + }, + { + "task_id": "task_grn_inference", + "category": "Metric info", + "name": "Pct 'metric_name' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(metric_info, field)", + "message": "Metric metadata field 'metric_name' should be defined\n Task id: task_grn_inference\n Field: metric_name\n" + }, + { + "task_id": "task_grn_inference", + "category": "Metric info", + "name": "Pct 'metric_summary' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(metric_info, field)", + "message": "Metric metadata field 'metric_summary' should be defined\n Task id: task_grn_inference\n Field: metric_summary\n" + }, + { + "task_id": "task_grn_inference", + "category": "Metric info", + "name": "Pct 'paper_reference' missing", + "value": 1.0, + "severity": 2, + "severity_value": 3.0, + "code": "percent_missing(metric_info, field)", + "message": "Metric metadata field 'paper_reference' should be defined\n Task id: task_grn_inference\n Field: paper_reference\n" + }, + { + "task_id": "task_grn_inference", + "category": "Metric info", + "name": "Pct 'maximize' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(metric_info, field)", + "message": "Metric metadata field 'maximize' should be defined\n Task id: task_grn_inference\n Field: maximize\n" + }, + { + "task_id": "task_grn_inference", + "category": "Dataset info", + "name": "Pct 'task_id' missing", + "value": 1.0, + "severity": 2, + "severity_value": 3.0, + "code": "percent_missing(dataset_info, field)", + "message": "Dataset metadata field 'task_id' should be defined\n Task id: task_grn_inference\n Field: task_id\n" + }, + { + "task_id": "task_grn_inference", + "category": "Dataset info", + "name": "Pct 'dataset_id' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(dataset_info, field)", + "message": "Dataset metadata field 'dataset_id' should be defined\n Task id: task_grn_inference\n Field: dataset_id\n" + }, + { + "task_id": "task_grn_inference", + "category": "Dataset info", + "name": "Pct 'dataset_name' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(dataset_info, field)", + "message": "Dataset metadata field 'dataset_name' should be defined\n Task id: task_grn_inference\n Field: dataset_name\n" + }, + { + "task_id": "task_grn_inference", + "category": "Dataset info", + "name": "Pct 'dataset_summary' missing", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "percent_missing(dataset_info, field)", + "message": "Dataset metadata field 'dataset_summary' should be defined\n Task id: task_grn_inference\n Field: dataset_summary\n" + }, + { + "task_id": "task_grn_inference", + "category": "Dataset info", + "name": "Pct 'data_reference' missing", + "value": 1.0, + "severity": 2, + "severity_value": 3.0, + "code": "percent_missing(dataset_info, field)", + "message": "Dataset metadata field 'data_reference' should be defined\n Task id: task_grn_inference\n Field: data_reference\n" + }, + { + "task_id": "task_grn_inference", + "category": "Dataset info", + "name": "Pct 'data_url' missing", + "value": 1.0, + "severity": 2, + "severity_value": 3.0, + "code": "percent_missing(dataset_info, field)", + "message": "Dataset metadata field 'data_url' should be defined\n Task id: task_grn_inference\n Field: data_url\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw data", + "name": "Number of results", + "value": 78, + "severity": 0, + "severity_value": -1.9999999999999996, + "code": "len(results) == len(method_info) * len(metric_info) * len(dataset_info)", + "message": "Number of results should be equal to #methods × #metrics × #datasets.\n Task id: task_grn_inference\n Number of results: 78\n Number of methods: 13\n Number of metrics: 8\n Number of datasets: 5\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Metric 'r1_all' %missing", + "value": 0.36923076923076925, + "severity": 3, + "severity_value": 3.6923076923076925, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n Metric id: r1_all\n Percentage missing: 37%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Metric 'r1_grn' %missing", + "value": 0.36923076923076925, + "severity": 3, + "severity_value": 3.6923076923076925, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n Metric id: r1_grn\n Percentage missing: 37%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Metric 'r2-theta-0.0' %missing", + "value": 0.36923076923076925, + "severity": 3, + "severity_value": 3.6923076923076925, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n Metric id: r2-theta-0.0\n Percentage missing: 37%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Metric 'r2-theta-0.5' %missing", + "value": 0.36923076923076925, + "severity": 3, + "severity_value": 3.6923076923076925, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n Metric id: r2-theta-0.5\n Percentage missing: 37%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Metric 'r2-theta-1.0' %missing", + "value": 0.36923076923076925, + "severity": 3, + "severity_value": 3.6923076923076925, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n Metric id: r2-theta-1.0\n Percentage missing: 37%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Metric 'ws-theta-0.0' %missing", + "value": 0.6615384615384615, + "severity": 3, + "severity_value": 6.615384615384615, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n Metric id: ws-theta-0.0\n Percentage missing: 66%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Metric 'ws-theta-0.5' %missing", + "value": 0.6615384615384615, + "severity": 3, + "severity_value": 6.615384615384615, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n Metric id: ws-theta-0.5\n Percentage missing: 66%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Metric 'ws-theta-1.0' %missing", + "value": 0.6615384615384615, + "severity": 3, + "severity_value": 6.615384615384615, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n Metric id: ws-theta-1.0\n Percentage missing: 66%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'pearson_corr' %missing", + "value": 0.15000000000000002, + "severity": 1, + "severity_value": 1.5000000000000002, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: pearson_corr\n Percentage missing: 15%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'negative_control' %missing", + "value": 0.15000000000000002, + "severity": 1, + "severity_value": 1.5000000000000002, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: negative_control\n Percentage missing: 15%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'positive_control' %missing", + "value": 0.15000000000000002, + "severity": 1, + "severity_value": 1.5000000000000002, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: positive_control\n Percentage missing: 15%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'portia' %missing", + "value": 0.15000000000000002, + "severity": 1, + "severity_value": 1.5000000000000002, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: portia\n Percentage missing: 15%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'ppcor' %missing", + "value": 0.275, + "severity": 2, + "severity_value": 2.75, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: ppcor\n Percentage missing: 28%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'scenic' %missing", + "value": 0.15000000000000002, + "severity": 1, + "severity_value": 1.5000000000000002, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: scenic\n Percentage missing: 15%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'scenicplus' %missing", + "value": 0.875, + "severity": 3, + "severity_value": 8.75, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: scenicplus\n Percentage missing: 88%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'scprint' %missing", + "value": 0.675, + "severity": 3, + "severity_value": 6.75, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: scprint\n Percentage missing: 68%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'grnboost' %missing", + "value": 0.15000000000000002, + "severity": 1, + "severity_value": 1.5000000000000002, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: grnboost\n Percentage missing: 15%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'scglue' %missing", + "value": 0.875, + "severity": 3, + "severity_value": 8.75, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: scglue\n Percentage missing: 88%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'granie' %missing", + "value": 0.875, + "severity": 3, + "severity_value": 8.75, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: granie\n Percentage missing: 88%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'figr' %missing", + "value": 0.875, + "severity": 3, + "severity_value": 8.75, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: figr\n Percentage missing: 88%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Method 'celloracle' %missing", + "value": 0.875, + "severity": 3, + "severity_value": 8.75, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n method id: celloracle\n Percentage missing: 88%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Dataset 'op' %missing", + "value": 0.375, + "severity": 3, + "severity_value": 3.75, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n dataset id: op\n Percentage missing: 38%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Dataset 'nakatake' %missing", + "value": 0.7115384615384616, + "severity": 3, + "severity_value": 7.115384615384615, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n dataset id: nakatake\n Percentage missing: 71%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Dataset 'norman' %missing", + "value": 0.3846153846153846, + "severity": 3, + "severity_value": 3.846153846153846, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n dataset id: norman\n Percentage missing: 38%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Dataset 'replogle' %missing", + "value": 0.46153846153846156, + "severity": 3, + "severity_value": 4.615384615384615, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n dataset id: replogle\n Percentage missing: 46%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Raw results", + "name": "Dataset 'adamson' %missing", + "value": 0.46153846153846156, + "severity": 3, + "severity_value": 4.615384615384615, + "code": "pct_missing <= .1", + "message": "Percentage of missing results should be less than 10%.\n Task id: task_grn_inference\n dataset id: adamson\n Percentage missing: 46%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score pearson_corr r1_all", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method pearson_corr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: r1_all\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score pearson_corr r1_all", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method pearson_corr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: r1_all\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score negative_control r1_all", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method negative_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: r1_all\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score negative_control r1_all", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method negative_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: r1_all\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score positive_control r1_all", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method positive_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: r1_all\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score positive_control r1_all", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method positive_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: r1_all\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score portia r1_all", + "value": -0.2998, + "severity": 0, + "severity_value": 0.2998, + "code": "worst_score >= -1", + "message": "Method portia performs much worse than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: r1_all\n Worst score: -0.2998%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score portia r1_all", + "value": 4.5733, + "severity": 2, + "severity_value": 2.28665, + "code": "best_score <= 2", + "message": "Method portia performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: r1_all\n Best score: 4.5733%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score ppcor r1_all", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method ppcor performs much worse than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: r1_all\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score ppcor r1_all", + "value": 5.5476, + "severity": 2, + "severity_value": 2.7738, + "code": "best_score <= 2", + "message": "Method ppcor performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: r1_all\n Best score: 5.5476%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenic r1_all", + "value": -0.5592, + "severity": 0, + "severity_value": 0.5592, + "code": "worst_score >= -1", + "message": "Method scenic performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: r1_all\n Worst score: -0.5592%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenic r1_all", + "value": 1.4205, + "severity": 0, + "severity_value": 0.71025, + "code": "best_score <= 2", + "message": "Method scenic performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: r1_all\n Best score: 1.4205%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenicplus r1_all", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenicplus performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: r1_all\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenicplus r1_all", + "value": 0.7924, + "severity": 0, + "severity_value": 0.3962, + "code": "best_score <= 2", + "message": "Method scenicplus performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: r1_all\n Best score: 0.7924%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scprint r1_all", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scprint performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: r1_all\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scprint r1_all", + "value": 0.8641, + "severity": 0, + "severity_value": 0.43205, + "code": "best_score <= 2", + "message": "Method scprint performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: r1_all\n Best score: 0.8641%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score grnboost r1_all", + "value": -1.3461, + "severity": 1, + "severity_value": 1.3461, + "code": "worst_score >= -1", + "message": "Method grnboost performs much worse than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: r1_all\n Worst score: -1.3461%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score grnboost r1_all", + "value": 1.7614, + "severity": 0, + "severity_value": 0.8807, + "code": "best_score <= 2", + "message": "Method grnboost performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: r1_all\n Best score: 1.7614%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scglue r1_all", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scglue performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: r1_all\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scglue r1_all", + "value": 0.1874, + "severity": 0, + "severity_value": 0.0937, + "code": "best_score <= 2", + "message": "Method scglue performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: r1_all\n Best score: 0.1874%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score granie r1_all", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method granie performs much worse than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: r1_all\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score granie r1_all", + "value": 0.1488, + "severity": 0, + "severity_value": 0.0744, + "code": "best_score <= 2", + "message": "Method granie performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: r1_all\n Best score: 0.1488%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score figr r1_all", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method figr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: r1_all\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score figr r1_all", + "value": 0.08, + "severity": 0, + "severity_value": 0.04, + "code": "best_score <= 2", + "message": "Method figr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: r1_all\n Best score: 0.08%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score celloracle r1_all", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method celloracle performs much worse than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: r1_all\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score celloracle r1_all", + "value": 0.7409, + "severity": 0, + "severity_value": 0.37045, + "code": "best_score <= 2", + "message": "Method celloracle performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: r1_all\n Best score: 0.7409%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score pearson_corr r1_grn", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method pearson_corr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: r1_grn\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score pearson_corr r1_grn", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method pearson_corr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: r1_grn\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score negative_control r1_grn", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method negative_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: r1_grn\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score negative_control r1_grn", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method negative_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: r1_grn\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score positive_control r1_grn", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method positive_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: r1_grn\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score positive_control r1_grn", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method positive_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: r1_grn\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score portia r1_grn", + "value": -0.33, + "severity": 0, + "severity_value": 0.33, + "code": "worst_score >= -1", + "message": "Method portia performs much worse than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: r1_grn\n Worst score: -0.33%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score portia r1_grn", + "value": 1.8316, + "severity": 0, + "severity_value": 0.9158, + "code": "best_score <= 2", + "message": "Method portia performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: r1_grn\n Best score: 1.8316%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score ppcor r1_grn", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method ppcor performs much worse than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: r1_grn\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score ppcor r1_grn", + "value": 2.2129, + "severity": 1, + "severity_value": 1.10645, + "code": "best_score <= 2", + "message": "Method ppcor performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: r1_grn\n Best score: 2.2129%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenic r1_grn", + "value": -0.4906, + "severity": 0, + "severity_value": 0.4906, + "code": "worst_score >= -1", + "message": "Method scenic performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: r1_grn\n Worst score: -0.4906%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenic r1_grn", + "value": 1.5896, + "severity": 0, + "severity_value": 0.7948, + "code": "best_score <= 2", + "message": "Method scenic performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: r1_grn\n Best score: 1.5896%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenicplus r1_grn", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenicplus performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: r1_grn\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenicplus r1_grn", + "value": 0.6022, + "severity": 0, + "severity_value": 0.3011, + "code": "best_score <= 2", + "message": "Method scenicplus performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: r1_grn\n Best score: 0.6022%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scprint r1_grn", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scprint performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: r1_grn\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scprint r1_grn", + "value": 0.9565, + "severity": 0, + "severity_value": 0.47825, + "code": "best_score <= 2", + "message": "Method scprint performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: r1_grn\n Best score: 0.9565%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score grnboost r1_grn", + "value": -0.5688, + "severity": 0, + "severity_value": 0.5688, + "code": "worst_score >= -1", + "message": "Method grnboost performs much worse than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: r1_grn\n Worst score: -0.5688%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score grnboost r1_grn", + "value": 1.2368, + "severity": 0, + "severity_value": 0.6184, + "code": "best_score <= 2", + "message": "Method grnboost performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: r1_grn\n Best score: 1.2368%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scglue r1_grn", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scglue performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: r1_grn\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scglue r1_grn", + "value": 0.4259, + "severity": 0, + "severity_value": 0.21295, + "code": "best_score <= 2", + "message": "Method scglue performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: r1_grn\n Best score: 0.4259%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score granie r1_grn", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method granie performs much worse than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: r1_grn\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score granie r1_grn", + "value": 0.1035, + "severity": 0, + "severity_value": 0.05175, + "code": "best_score <= 2", + "message": "Method granie performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: r1_grn\n Best score: 0.1035%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score figr r1_grn", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method figr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: r1_grn\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score figr r1_grn", + "value": 0.4579, + "severity": 0, + "severity_value": 0.22895, + "code": "best_score <= 2", + "message": "Method figr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: r1_grn\n Best score: 0.4579%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score celloracle r1_grn", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method celloracle performs much worse than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: r1_grn\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score celloracle r1_grn", + "value": 0.623, + "severity": 0, + "severity_value": 0.3115, + "code": "best_score <= 2", + "message": "Method celloracle performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: r1_grn\n Best score: 0.623%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score pearson_corr r2-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method pearson_corr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: r2-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score pearson_corr r2-theta-0.0", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method pearson_corr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: r2-theta-0.0\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score negative_control r2-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method negative_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: r2-theta-0.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score negative_control r2-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method negative_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: r2-theta-0.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score positive_control r2-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method positive_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: r2-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score positive_control r2-theta-0.0", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method positive_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: r2-theta-0.0\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score portia r2-theta-0.0", + "value": -0.296, + "severity": 0, + "severity_value": 0.296, + "code": "worst_score >= -1", + "message": "Method portia performs much worse than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: r2-theta-0.0\n Worst score: -0.296%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score portia r2-theta-0.0", + "value": 0.8997, + "severity": 0, + "severity_value": 0.44985, + "code": "best_score <= 2", + "message": "Method portia performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: r2-theta-0.0\n Best score: 0.8997%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score ppcor r2-theta-0.0", + "value": -0.0045, + "severity": 0, + "severity_value": 0.0045, + "code": "worst_score >= -1", + "message": "Method ppcor performs much worse than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: r2-theta-0.0\n Worst score: -0.0045%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score ppcor r2-theta-0.0", + "value": 0.4102, + "severity": 0, + "severity_value": 0.2051, + "code": "best_score <= 2", + "message": "Method ppcor performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: r2-theta-0.0\n Best score: 0.4102%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenic r2-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenic performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: r2-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenic r2-theta-0.0", + "value": 0.7108, + "severity": 0, + "severity_value": 0.3554, + "code": "best_score <= 2", + "message": "Method scenic performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: r2-theta-0.0\n Best score: 0.7108%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenicplus r2-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenicplus performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: r2-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenicplus r2-theta-0.0", + "value": 0.9516, + "severity": 0, + "severity_value": 0.4758, + "code": "best_score <= 2", + "message": "Method scenicplus performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: r2-theta-0.0\n Best score: 0.9516%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scprint r2-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scprint performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: r2-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scprint r2-theta-0.0", + "value": 0.8811, + "severity": 0, + "severity_value": 0.44055, + "code": "best_score <= 2", + "message": "Method scprint performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: r2-theta-0.0\n Best score: 0.8811%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score grnboost r2-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method grnboost performs much worse than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: r2-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score grnboost r2-theta-0.0", + "value": 1.3291, + "severity": 0, + "severity_value": 0.66455, + "code": "best_score <= 2", + "message": "Method grnboost performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: r2-theta-0.0\n Best score: 1.3291%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scglue r2-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scglue performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: r2-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scglue r2-theta-0.0", + "value": 0.3365, + "severity": 0, + "severity_value": 0.16825, + "code": "best_score <= 2", + "message": "Method scglue performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: r2-theta-0.0\n Best score: 0.3365%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score granie r2-theta-0.0", + "value": -0.1114, + "severity": 0, + "severity_value": 0.1114, + "code": "worst_score >= -1", + "message": "Method granie performs much worse than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: r2-theta-0.0\n Worst score: -0.1114%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score granie r2-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method granie performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: r2-theta-0.0\n Best score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score figr r2-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method figr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: r2-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score figr r2-theta-0.0", + "value": 0.5974, + "severity": 0, + "severity_value": 0.2987, + "code": "best_score <= 2", + "message": "Method figr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: r2-theta-0.0\n Best score: 0.5974%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score celloracle r2-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method celloracle performs much worse than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: r2-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score celloracle r2-theta-0.0", + "value": 0.9779, + "severity": 0, + "severity_value": 0.48895, + "code": "best_score <= 2", + "message": "Method celloracle performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: r2-theta-0.0\n Best score: 0.9779%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score pearson_corr r2-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method pearson_corr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: r2-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score pearson_corr r2-theta-0.5", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method pearson_corr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: r2-theta-0.5\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score negative_control r2-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method negative_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: r2-theta-0.5\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score negative_control r2-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method negative_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: r2-theta-0.5\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score positive_control r2-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method positive_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: r2-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score positive_control r2-theta-0.5", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method positive_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: r2-theta-0.5\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score portia r2-theta-0.5", + "value": -0.8293, + "severity": 0, + "severity_value": 0.8293, + "code": "worst_score >= -1", + "message": "Method portia performs much worse than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: r2-theta-0.5\n Worst score: -0.8293%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score portia r2-theta-0.5", + "value": 0.8938, + "severity": 0, + "severity_value": 0.4469, + "code": "best_score <= 2", + "message": "Method portia performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: r2-theta-0.5\n Best score: 0.8938%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score ppcor r2-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method ppcor performs much worse than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: r2-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score ppcor r2-theta-0.5", + "value": 0.52, + "severity": 0, + "severity_value": 0.26, + "code": "best_score <= 2", + "message": "Method ppcor performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: r2-theta-0.5\n Best score: 0.52%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenic r2-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenic performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: r2-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenic r2-theta-0.5", + "value": 0.6294, + "severity": 0, + "severity_value": 0.3147, + "code": "best_score <= 2", + "message": "Method scenic performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: r2-theta-0.5\n Best score: 0.6294%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenicplus r2-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenicplus performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: r2-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenicplus r2-theta-0.5", + "value": 1.0967, + "severity": 0, + "severity_value": 0.54835, + "code": "best_score <= 2", + "message": "Method scenicplus performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: r2-theta-0.5\n Best score: 1.0967%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scprint r2-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scprint performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: r2-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scprint r2-theta-0.5", + "value": 0.7338, + "severity": 0, + "severity_value": 0.3669, + "code": "best_score <= 2", + "message": "Method scprint performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: r2-theta-0.5\n Best score: 0.7338%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score grnboost r2-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method grnboost performs much worse than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: r2-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score grnboost r2-theta-0.5", + "value": 1.5093, + "severity": 0, + "severity_value": 0.75465, + "code": "best_score <= 2", + "message": "Method grnboost performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: r2-theta-0.5\n Best score: 1.5093%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scglue r2-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scglue performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: r2-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scglue r2-theta-0.5", + "value": 0.1601, + "severity": 0, + "severity_value": 0.08005, + "code": "best_score <= 2", + "message": "Method scglue performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: r2-theta-0.5\n Best score: 0.1601%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score granie r2-theta-0.5", + "value": -0.2512, + "severity": 0, + "severity_value": 0.2512, + "code": "worst_score >= -1", + "message": "Method granie performs much worse than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: r2-theta-0.5\n Worst score: -0.2512%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score granie r2-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method granie performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: r2-theta-0.5\n Best score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score figr r2-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method figr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: r2-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score figr r2-theta-0.5", + "value": 0.6611, + "severity": 0, + "severity_value": 0.33055, + "code": "best_score <= 2", + "message": "Method figr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: r2-theta-0.5\n Best score: 0.6611%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score celloracle r2-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method celloracle performs much worse than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: r2-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score celloracle r2-theta-0.5", + "value": 0.9749, + "severity": 0, + "severity_value": 0.48745, + "code": "best_score <= 2", + "message": "Method celloracle performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: r2-theta-0.5\n Best score: 0.9749%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score pearson_corr r2-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method pearson_corr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: r2-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score pearson_corr r2-theta-1.0", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method pearson_corr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: r2-theta-1.0\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score negative_control r2-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method negative_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: r2-theta-1.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score negative_control r2-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method negative_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: r2-theta-1.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score positive_control r2-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method positive_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: r2-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score positive_control r2-theta-1.0", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method positive_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: r2-theta-1.0\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score portia r2-theta-1.0", + "value": -0.1239, + "severity": 0, + "severity_value": 0.1239, + "code": "worst_score >= -1", + "message": "Method portia performs much worse than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: r2-theta-1.0\n Worst score: -0.1239%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score portia r2-theta-1.0", + "value": 0.9038, + "severity": 0, + "severity_value": 0.4519, + "code": "best_score <= 2", + "message": "Method portia performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: r2-theta-1.0\n Best score: 0.9038%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score ppcor r2-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method ppcor performs much worse than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: r2-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score ppcor r2-theta-1.0", + "value": 0.569, + "severity": 0, + "severity_value": 0.2845, + "code": "best_score <= 2", + "message": "Method ppcor performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: r2-theta-1.0\n Best score: 0.569%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenic r2-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenic performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: r2-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenic r2-theta-1.0", + "value": 0.6673, + "severity": 0, + "severity_value": 0.33365, + "code": "best_score <= 2", + "message": "Method scenic performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: r2-theta-1.0\n Best score: 0.6673%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenicplus r2-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenicplus performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: r2-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenicplus r2-theta-1.0", + "value": 1.0831, + "severity": 0, + "severity_value": 0.54155, + "code": "best_score <= 2", + "message": "Method scenicplus performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: r2-theta-1.0\n Best score: 1.0831%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scprint r2-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scprint performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: r2-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scprint r2-theta-1.0", + "value": 0.4843, + "severity": 0, + "severity_value": 0.24215, + "code": "best_score <= 2", + "message": "Method scprint performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: r2-theta-1.0\n Best score: 0.4843%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score grnboost r2-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method grnboost performs much worse than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: r2-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score grnboost r2-theta-1.0", + "value": 1.8408, + "severity": 0, + "severity_value": 0.9204, + "code": "best_score <= 2", + "message": "Method grnboost performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: r2-theta-1.0\n Best score: 1.8408%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scglue r2-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scglue performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: r2-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scglue r2-theta-1.0", + "value": 0.0631, + "severity": 0, + "severity_value": 0.03155, + "code": "best_score <= 2", + "message": "Method scglue performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: r2-theta-1.0\n Best score: 0.0631%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score granie r2-theta-1.0", + "value": -0.3049, + "severity": 0, + "severity_value": 0.3049, + "code": "worst_score >= -1", + "message": "Method granie performs much worse than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: r2-theta-1.0\n Worst score: -0.3049%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score granie r2-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method granie performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: r2-theta-1.0\n Best score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score figr r2-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method figr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: r2-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score figr r2-theta-1.0", + "value": 0.7975, + "severity": 0, + "severity_value": 0.39875, + "code": "best_score <= 2", + "message": "Method figr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: r2-theta-1.0\n Best score: 0.7975%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score celloracle r2-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method celloracle performs much worse than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: r2-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score celloracle r2-theta-1.0", + "value": 0.7524, + "severity": 0, + "severity_value": 0.3762, + "code": "best_score <= 2", + "message": "Method celloracle performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: r2-theta-1.0\n Best score: 0.7524%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score pearson_corr ws-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method pearson_corr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: ws-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score pearson_corr ws-theta-0.0", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method pearson_corr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: ws-theta-0.0\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score negative_control ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method negative_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: ws-theta-0.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score negative_control ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method negative_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: ws-theta-0.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score positive_control ws-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method positive_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: ws-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score positive_control ws-theta-0.0", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method positive_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: ws-theta-0.0\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score portia ws-theta-0.0", + "value": -0.0103, + "severity": 0, + "severity_value": 0.0103, + "code": "worst_score >= -1", + "message": "Method portia performs much worse than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: ws-theta-0.0\n Worst score: -0.0103%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score portia ws-theta-0.0", + "value": 0.7341, + "severity": 0, + "severity_value": 0.36705, + "code": "best_score <= 2", + "message": "Method portia performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: ws-theta-0.0\n Best score: 0.7341%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score ppcor ws-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method ppcor performs much worse than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: ws-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score ppcor ws-theta-0.0", + "value": 0.4236, + "severity": 0, + "severity_value": 0.2118, + "code": "best_score <= 2", + "message": "Method ppcor performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: ws-theta-0.0\n Best score: 0.4236%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenic ws-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenic performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: ws-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenic ws-theta-0.0", + "value": 1.0108, + "severity": 0, + "severity_value": 0.5054, + "code": "best_score <= 2", + "message": "Method scenic performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: ws-theta-0.0\n Best score: 1.0108%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenicplus ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenicplus performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: ws-theta-0.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenicplus ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method scenicplus performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: ws-theta-0.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scprint ws-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scprint performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: ws-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scprint ws-theta-0.0", + "value": 0.6129, + "severity": 0, + "severity_value": 0.30645, + "code": "best_score <= 2", + "message": "Method scprint performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: ws-theta-0.0\n Best score: 0.6129%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score grnboost ws-theta-0.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method grnboost performs much worse than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: ws-theta-0.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score grnboost ws-theta-0.0", + "value": 1.0332, + "severity": 0, + "severity_value": 0.5166, + "code": "best_score <= 2", + "message": "Method grnboost performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: ws-theta-0.0\n Best score: 1.0332%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scglue ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scglue performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: ws-theta-0.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scglue ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method scglue performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: ws-theta-0.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score granie ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method granie performs much worse than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: ws-theta-0.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score granie ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method granie performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: ws-theta-0.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score figr ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method figr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: ws-theta-0.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score figr ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method figr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: ws-theta-0.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score celloracle ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method celloracle performs much worse than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: ws-theta-0.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score celloracle ws-theta-0.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method celloracle performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: ws-theta-0.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score pearson_corr ws-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method pearson_corr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: ws-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score pearson_corr ws-theta-0.5", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method pearson_corr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: ws-theta-0.5\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score negative_control ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method negative_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: ws-theta-0.5\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score negative_control ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method negative_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: ws-theta-0.5\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score positive_control ws-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method positive_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: ws-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score positive_control ws-theta-0.5", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method positive_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: ws-theta-0.5\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score portia ws-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method portia performs much worse than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: ws-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score portia ws-theta-0.5", + "value": 0.5985, + "severity": 0, + "severity_value": 0.29925, + "code": "best_score <= 2", + "message": "Method portia performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: ws-theta-0.5\n Best score: 0.5985%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score ppcor ws-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method ppcor performs much worse than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: ws-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score ppcor ws-theta-0.5", + "value": 0.2438, + "severity": 0, + "severity_value": 0.1219, + "code": "best_score <= 2", + "message": "Method ppcor performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: ws-theta-0.5\n Best score: 0.2438%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenic ws-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenic performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: ws-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenic ws-theta-0.5", + "value": 0.4707, + "severity": 0, + "severity_value": 0.23535, + "code": "best_score <= 2", + "message": "Method scenic performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: ws-theta-0.5\n Best score: 0.4707%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenicplus ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenicplus performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: ws-theta-0.5\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenicplus ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method scenicplus performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: ws-theta-0.5\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scprint ws-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scprint performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: ws-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scprint ws-theta-0.5", + "value": 0.8893, + "severity": 0, + "severity_value": 0.44465, + "code": "best_score <= 2", + "message": "Method scprint performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: ws-theta-0.5\n Best score: 0.8893%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score grnboost ws-theta-0.5", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method grnboost performs much worse than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: ws-theta-0.5\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score grnboost ws-theta-0.5", + "value": 1.2617, + "severity": 0, + "severity_value": 0.63085, + "code": "best_score <= 2", + "message": "Method grnboost performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: ws-theta-0.5\n Best score: 1.2617%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scglue ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scglue performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: ws-theta-0.5\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scglue ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method scglue performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: ws-theta-0.5\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score granie ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method granie performs much worse than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: ws-theta-0.5\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score granie ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method granie performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: ws-theta-0.5\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score figr ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method figr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: ws-theta-0.5\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score figr ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method figr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: ws-theta-0.5\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score celloracle ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method celloracle performs much worse than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: ws-theta-0.5\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score celloracle ws-theta-0.5", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method celloracle performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: ws-theta-0.5\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score pearson_corr ws-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method pearson_corr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: ws-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score pearson_corr ws-theta-1.0", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method pearson_corr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: pearson_corr\n Metric id: ws-theta-1.0\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score negative_control ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method negative_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: ws-theta-1.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score negative_control ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method negative_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: negative_control\n Metric id: ws-theta-1.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score positive_control ws-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method positive_control performs much worse than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: ws-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score positive_control ws-theta-1.0", + "value": 1.0, + "severity": 0, + "severity_value": 0.5, + "code": "best_score <= 2", + "message": "Method positive_control performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: positive_control\n Metric id: ws-theta-1.0\n Best score: 1.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score portia ws-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method portia performs much worse than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: ws-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score portia ws-theta-1.0", + "value": 0.2897, + "severity": 0, + "severity_value": 0.14485, + "code": "best_score <= 2", + "message": "Method portia performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: portia\n Metric id: ws-theta-1.0\n Best score: 0.2897%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score ppcor ws-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method ppcor performs much worse than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: ws-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score ppcor ws-theta-1.0", + "value": 0.2795, + "severity": 0, + "severity_value": 0.13975, + "code": "best_score <= 2", + "message": "Method ppcor performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: ppcor\n Metric id: ws-theta-1.0\n Best score: 0.2795%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenic ws-theta-1.0", + "value": -0.0291, + "severity": 0, + "severity_value": 0.0291, + "code": "worst_score >= -1", + "message": "Method scenic performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: ws-theta-1.0\n Worst score: -0.0291%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenic ws-theta-1.0", + "value": 0.2806, + "severity": 0, + "severity_value": 0.1403, + "code": "best_score <= 2", + "message": "Method scenic performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenic\n Metric id: ws-theta-1.0\n Best score: 0.2806%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scenicplus ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scenicplus performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: ws-theta-1.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scenicplus ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method scenicplus performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scenicplus\n Metric id: ws-theta-1.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scprint ws-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scprint performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: ws-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scprint ws-theta-1.0", + "value": 0.9468, + "severity": 0, + "severity_value": 0.4734, + "code": "best_score <= 2", + "message": "Method scprint performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scprint\n Metric id: ws-theta-1.0\n Best score: 0.9468%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score grnboost ws-theta-1.0", + "value": 0.0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method grnboost performs much worse than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: ws-theta-1.0\n Worst score: 0.0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score grnboost ws-theta-1.0", + "value": 1.1635, + "severity": 0, + "severity_value": 0.58175, + "code": "best_score <= 2", + "message": "Method grnboost performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: grnboost\n Metric id: ws-theta-1.0\n Best score: 1.1635%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score scglue ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method scglue performs much worse than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: ws-theta-1.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score scglue ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method scglue performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: scglue\n Metric id: ws-theta-1.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score granie ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method granie performs much worse than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: ws-theta-1.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score granie ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method granie performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: granie\n Metric id: ws-theta-1.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score figr ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method figr performs much worse than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: ws-theta-1.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score figr ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method figr performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: figr\n Metric id: ws-theta-1.0\n Best score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Worst score celloracle ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": -0.0, + "code": "worst_score >= -1", + "message": "Method celloracle performs much worse than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: ws-theta-1.0\n Worst score: 0%\n" + }, + { + "task_id": "task_grn_inference", + "category": "Scaling", + "name": "Best score celloracle ws-theta-1.0", + "value": 0, + "severity": 0, + "severity_value": 0.0, + "code": "best_score <= 2", + "message": "Method celloracle performs a lot better than baselines.\n Task id: task_grn_inference\n Method id: celloracle\n Metric id: ws-theta-1.0\n Best score: 0%\n" + } +] \ No newline at end of file diff --git a/results/grn/data/results.json b/results/grn/data/results.json new file mode 100644 index 00000000..93d58640 --- /dev/null +++ b/results/grn/data/results.json @@ -0,0 +1,2134 @@ +[ + { + "dataset_id": "adamson", + "method_id": "celloracle", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "figr", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "granie", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "grnboost", + "metric_values": { + "r1_all": 0.0081, + "r1_grn": 0.013, + "r2-theta-0.0": 0.7089, + "r2-theta-0.5": 0.688, + "r2-theta-1.0": 0.4918, + "ws-theta-0.0": 0.8857, + "ws-theta-0.5": 0.8327, + "ws-theta-1.0": 0.7337 + }, + "scaled_scores": { + "r1_all": 1.7614, + "r1_grn": 1.116, + "r2-theta-0.0": 0.8963, + "r2-theta-0.5": 1.4339, + "r2-theta-1.0": 1.8408, + "ws-theta-0.0": 1.0332, + "ws-theta-0.5": 1.188, + "ws-theta-1.0": 1.1045 + }, + "mean_score": 0.987, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "negative_control", + "metric_values": { + "r1_all": 0.0004, + "r1_grn": 0.0004, + "r2-theta-0.0": 0.4987, + "r2-theta-0.5": 0.5883, + "r2-theta-1.0": 0.451, + "ws-theta-0.0": 0.4716, + "ws-theta-0.5": 0.5034, + "ws-theta-1.0": 0.5057 + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "pearson_corr", + "metric_values": { + "r1_all": 0.0028, + "r1_grn": 0.0047, + "r2-theta-0.0": 0.6806, + "r2-theta-0.5": 0.6513, + "r2-theta-1.0": 0.4732, + "ws-theta-0.0": 0.8651, + "ws-theta-0.5": 0.7806, + "ws-theta-1.0": 0.7121 + }, + "scaled_scores": { + "r1_all": 0.5437, + "r1_grn": 0.377, + "r2-theta-0.0": 0.7758, + "r2-theta-0.5": 0.907, + "r2-theta-1.0": 1, + "ws-theta-0.0": 0.9818, + "ws-theta-0.5": 1, + "ws-theta-1.0": 1 + }, + "mean_score": 0.8232, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "portia", + "metric_values": { + "r1_all": 0.0203, + "r1_grn": 0.021, + "r2-theta-0.0": 0.438, + "r2-theta-0.5": 0.5535, + "r2-theta-1.0": 0.4482, + "ws-theta-0.0": 0.7658, + "ws-theta-0.5": 0.6693, + "ws-theta-1.0": 0.5655 + }, + "scaled_scores": { + "r1_all": 4.5733, + "r1_grn": 1.8316, + "r2-theta-0.0": -0.2592, + "r2-theta-0.5": -0.4999, + "r2-theta-1.0": -0.1239, + "ws-theta-0.0": 0.7341, + "ws-theta-0.5": 0.5985, + "ws-theta-1.0": 0.2897 + }, + "mean_score": 0.4528, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "positive_control", + "metric_values": { + "r1_all": 0.0048, + "r1_grn": 0.0117, + "r2-theta-0.0": 0.7332, + "r2-theta-0.5": 0.6578, + "r2-theta-1.0": 0.4676, + "ws-theta-0.0": 0.8724, + "ws-theta-0.5": 0.7354, + "ws-theta-1.0": 0.6838 + }, + "scaled_scores": { + "r1_all": 1, + "r1_grn": 1, + "r2-theta-0.0": 1, + "r2-theta-0.5": 1, + "r2-theta-1.0": 0.7515, + "ws-theta-0.0": 1, + "ws-theta-0.5": 0.8368, + "ws-theta-1.0": 0.863 + }, + "mean_score": 0.9314, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "ppcor", + "metric_values": { + "r1_all": 0.0245, + "r1_grn": 0.0253, + "r2-theta-0.0": 0.5745, + "r2-theta-0.5": 0.6244, + "r2-theta-1.0": 0.4636, + "ws-theta-0.0": 0.6414, + "ws-theta-0.5": 0.559, + "ws-theta-1.0": 0.5295 + }, + "scaled_scores": { + "r1_all": 5.5476, + "r1_grn": 2.2129, + "r2-theta-0.0": 0.3232, + "r2-theta-0.5": 0.52, + "r2-theta-1.0": 0.569, + "ws-theta-0.0": 0.4236, + "ws-theta-0.5": 0.2003, + "ws-theta-1.0": 0.1154 + }, + "mean_score": 0.5189, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "scenic", + "metric_values": { + "r1_all": -0.002, + "r1_grn": -0.0051, + "r2-theta-0.0": 0.6259, + "r2-theta-0.5": 0.6187, + "r2-theta-1.0": 0.4574, + "ws-theta-0.0": 0.8768, + "ws-theta-0.5": 0.6339, + "ws-theta-1.0": 0.4997 + }, + "scaled_scores": { + "r1_all": -0.5592, + "r1_grn": -0.4906, + "r2-theta-0.0": 0.5423, + "r2-theta-0.5": 0.4372, + "r2-theta-1.0": 0.291, + "ws-theta-0.0": 1.0108, + "ws-theta-0.5": 0.4707, + "ws-theta-1.0": -0.0291 + }, + "mean_score": 0.3426, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "scenicplus", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "scglue", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "adamson", + "method_id": "scprint", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "celloracle", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "figr", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "granie", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "grnboost", + "metric_values": { + "r1_all": -0.0101, + "r1_grn": -0.0172, + "r2-theta-0.0": 0.0217, + "r2-theta-0.5": 0.1303, + "r2-theta-1.0": 0.1156, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": -1.3461, + "r1_grn": -0.5688, + "r2-theta-0.0": 0.4481, + "r2-theta-0.5": 0.9233, + "r2-theta-1.0": 1.3136, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.2964, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "negative_control", + "metric_values": { + "r1_all": 0.0017, + "r1_grn": 0.0019, + "r2-theta-0.0": 0.003, + "r2-theta-0.5": 0.0205, + "r2-theta-1.0": 0.0412, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.5337, + "r1_grn": 0.3208, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.1068, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "pearson_corr", + "metric_values": { + "r1_all": -0.0017, + "r1_grn": -0.005, + "r2-theta-0.0": 0.0322, + "r2-theta-0.5": 0.1258, + "r2-theta-1.0": 0.0842, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0.6997, + "r2-theta-0.5": 0.8852, + "r2-theta-1.0": 0.76, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.2931, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "portia", + "metric_values": { + "r1_all": -0.0035, + "r1_grn": -0.0121, + "r2-theta-0.0": 0.0123, + "r2-theta-0.5": 0.0332, + "r2-theta-1.0": 0.0444, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": -0.2998, + "r1_grn": -0.33, + "r2-theta-0.0": 0.2225, + "r2-theta-0.5": 0.1067, + "r2-theta-1.0": 0.0557, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.0481, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "positive_control", + "metric_values": { + "r1_all": 0.0046, + "r1_grn": 0.0165, + "r2-theta-0.0": 0.0447, + "r2-theta-0.5": 0.1395, + "r2-theta-1.0": 0.0978, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 1, + "r1_grn": 1, + "r2-theta-0.0": 1, + "r2-theta-0.5": 1, + "r2-theta-1.0": 1, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.625, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "ppcor", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "scenic", + "metric_values": { + "r1_all": -0.0013, + "r1_grn": -0.0023, + "r2-theta-0.0": 0.0105, + "r2-theta-0.5": 0.0631, + "r2-theta-1.0": 0.0754, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.0623, + "r1_grn": 0.1226, + "r2-theta-0.0": 0.179, + "r2-theta-0.5": 0.3577, + "r2-theta-1.0": 0.6039, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.1657, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "scenicplus", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "scglue", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "nakatake", + "method_id": "scprint", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "celloracle", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "figr", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "granie", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "grnboost", + "metric_values": { + "r1_all": -0.0111, + "r1_grn": -0.0137, + "r2-theta-0.0": 0.488, + "r2-theta-0.5": 0.3533, + "r2-theta-1.0": 0.2802, + "ws-theta-0.0": 0.8131, + "ws-theta-0.5": 0.7651, + "ws-theta-1.0": 0.6562 + }, + "scaled_scores": { + "r1_all": 0.0882, + "r1_grn": 0.0779, + "r2-theta-0.0": 0.7318, + "r2-theta-0.5": 1.0746, + "r2-theta-1.0": 1.0066, + "ws-theta-0.0": 0.9053, + "ws-theta-0.5": 1.2617, + "ws-theta-1.0": 1.1635 + }, + "mean_score": 0.7254, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "negative_control", + "metric_values": { + "r1_all": 0.0105, + "r1_grn": 0.0105, + "r2-theta-0.0": 0.2569, + "r2-theta-0.5": 0.2804, + "r2-theta-1.0": 0.2312, + "ws-theta-0.0": 0.4902, + "ws-theta-0.5": 0.4968, + "ws-theta-1.0": 0.4874 + }, + "scaled_scores": { + "r1_all": 1, + "r1_grn": 1, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.25, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "pearson_corr", + "metric_values": { + "r1_all": -0.0131, + "r1_grn": -0.0157, + "r2-theta-0.0": 0.5126, + "r2-theta-0.5": 0.3483, + "r2-theta-1.0": 0.271, + "ws-theta-0.0": 0.7827, + "ws-theta-0.5": 0.6915, + "ws-theta-1.0": 0.6052 + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0.8097, + "r2-theta-0.5": 1, + "r2-theta-1.0": 0.818, + "ws-theta-0.0": 0.8202, + "ws-theta-0.5": 0.9154, + "ws-theta-1.0": 0.8122 + }, + "mean_score": 0.6469, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "portia", + "metric_values": { + "r1_all": -0.0106, + "r1_grn": -0.0107, + "r2-theta-0.0": 0.1634, + "r2-theta-0.5": 0.2241, + "r2-theta-1.0": 0.231, + "ws-theta-0.0": 0.4866, + "ws-theta-0.5": 0.4975, + "ws-theta-1.0": 0.512 + }, + "scaled_scores": { + "r1_all": 0.106, + "r1_grn": 0.1893, + "r2-theta-0.0": -0.296, + "r2-theta-0.5": -0.8293, + "r2-theta-1.0": -0.0045, + "ws-theta-0.0": -0.0103, + "ws-theta-0.5": 0.0032, + "ws-theta-1.0": 0.1696 + }, + "mean_score": 0.0585, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "positive_control", + "metric_values": { + "r1_all": 0.0016, + "r1_grn": 0.0047, + "r2-theta-0.0": 0.5727, + "r2-theta-0.5": 0.3417, + "r2-theta-1.0": 0.2799, + "ws-theta-0.0": 0.8468, + "ws-theta-0.5": 0.7095, + "ws-theta-1.0": 0.6324 + }, + "scaled_scores": { + "r1_all": 0.6243, + "r1_grn": 0.7785, + "r2-theta-0.0": 1, + "r2-theta-0.5": 0.9032, + "r2-theta-1.0": 1, + "ws-theta-0.0": 1, + "ws-theta-0.5": 1, + "ws-theta-1.0": 1 + }, + "mean_score": 0.9132, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "ppcor", + "metric_values": { + "r1_all": -0.0117, + "r1_grn": -0.0118, + "r2-theta-0.0": 0.3451, + "r2-theta-0.5": 0.2983, + "r2-theta-1.0": 0.2427, + "ws-theta-0.0": 0.5508, + "ws-theta-0.5": 0.5487, + "ws-theta-1.0": 0.5279 + }, + "scaled_scores": { + "r1_all": 0.0617, + "r1_grn": 0.1489, + "r2-theta-0.0": 0.2794, + "r2-theta-0.5": 0.2643, + "r2-theta-1.0": 0.2363, + "ws-theta-0.0": 0.1698, + "ws-theta-0.5": 0.2438, + "ws-theta-1.0": 0.2795 + }, + "mean_score": 0.2105, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "scenic", + "metric_values": { + "r1_all": 0.0041, + "r1_grn": 0.0164, + "r2-theta-0.0": 0.3918, + "r2-theta-0.5": 0.2934, + "r2-theta-1.0": 0.2317, + "ws-theta-0.0": 0.6859, + "ws-theta-0.5": 0.544, + "ws-theta-1.0": 0.5173 + }, + "scaled_scores": { + "r1_all": 0.7316, + "r1_grn": 1.2275, + "r2-theta-0.0": 0.427, + "r2-theta-0.5": 0.1923, + "r2-theta-1.0": 0.0098, + "ws-theta-0.0": 0.5488, + "ws-theta-0.5": 0.222, + "ws-theta-1.0": 0.206 + }, + "mean_score": 0.4172, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "scenicplus", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "scglue", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "norman", + "method_id": "scprint", + "metric_values": { + "r1_all": 0.0073, + "r1_grn": 0.0093, + "r2-theta-0.0": 0.5352, + "r2-theta-0.5": 0.3302, + "r2-theta-1.0": 0.2548, + "ws-theta-0.0": 0.7088, + "ws-theta-0.5": 0.6859, + "ws-theta-1.0": 0.6247 + }, + "scaled_scores": { + "r1_all": 0.8641, + "r1_grn": 0.9565, + "r2-theta-0.0": 0.8811, + "r2-theta-0.5": 0.7338, + "r2-theta-1.0": 0.4843, + "ws-theta-0.0": 0.6129, + "ws-theta-0.5": 0.8893, + "ws-theta-1.0": 0.9468 + }, + "mean_score": 0.7961, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "celloracle", + "metric_values": { + "r1_all": 0.4753, + "r1_grn": 0.7772, + "r2-theta-0.0": 0.4939, + "r2-theta-0.5": 0.4192, + "r2-theta-1.0": 0.3576, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.7409, + "r1_grn": 0.623, + "r2-theta-0.0": 0.9779, + "r2-theta-0.5": 0.9749, + "r2-theta-1.0": 0.7524, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.5086, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "figr", + "metric_values": { + "r1_all": 0.0529, + "r1_grn": 0.5718, + "r2-theta-0.0": 0.3707, + "r2-theta-0.5": 0.3742, + "r2-theta-1.0": 0.3587, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.08, + "r1_grn": 0.4579, + "r2-theta-0.0": 0.5974, + "r2-theta-0.5": 0.6611, + "r2-theta-1.0": 0.7975, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.3242, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "granie", + "metric_values": { + "r1_all": 0.0969, + "r1_grn": 0.1307, + "r2-theta-0.0": 0.1413, + "r2-theta-0.5": 0.2433, + "r2-theta-1.0": 0.3313, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.1488, + "r1_grn": 0.1035, + "r2-theta-0.0": -0.1114, + "r2-theta-0.5": -0.2512, + "r2-theta-1.0": -0.3049, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.0315, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "grnboost", + "metric_values": { + "r1_all": 0.6453, + "r1_grn": 0.8637, + "r2-theta-0.0": 0.5394, + "r2-theta-0.5": 0.4849, + "r2-theta-1.0": 0.3731, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 1.007, + "r1_grn": 0.6926, + "r2-theta-0.0": 1.1185, + "r2-theta-0.5": 1.4324, + "r2-theta-1.0": 1.3762, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.5866, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "negative_control", + "metric_values": { + "r1_all": 0.0019, + "r1_grn": 0.0019, + "r2-theta-0.0": 0.1774, + "r2-theta-0.5": 0.2794, + "r2-theta-1.0": 0.3389, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "pearson_corr", + "metric_values": { + "r1_all": 0.5703, + "r1_grn": 1.0482, + "r2-theta-0.0": 0.501, + "r2-theta-0.5": 0.4044, + "r2-theta-1.0": 0.3515, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.8896, + "r1_grn": 0.8408, + "r2-theta-0.0": 1, + "r2-theta-0.5": 0.8714, + "r2-theta-1.0": 0.5054, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.5134, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "portia", + "metric_values": { + "r1_all": 0.4657, + "r1_grn": 0.6743, + "r2-theta-0.0": 0.44, + "r2-theta-0.5": 0.3426, + "r2-theta-1.0": 0.3453, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.7259, + "r1_grn": 0.5403, + "r2-theta-0.0": 0.8114, + "r2-theta-0.5": 0.441, + "r2-theta-1.0": 0.2592, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.3472, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "positive_control", + "metric_values": { + "r1_all": 0.6408, + "r1_grn": 1.2462, + "r2-theta-0.0": 0.4854, + "r2-theta-0.5": 0.4228, + "r2-theta-1.0": 0.3638, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 1, + "r1_grn": 1, + "r2-theta-0.0": 0.9515, + "r2-theta-0.5": 1, + "r2-theta-1.0": 1, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.6189, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "ppcor", + "metric_values": { + "r1_all": 0.1967, + "r1_grn": 0.2385, + "r2-theta-0.0": 0.3101, + "r2-theta-0.5": 0.3098, + "r2-theta-1.0": 0.3411, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.3049, + "r1_grn": 0.1901, + "r2-theta-0.0": 0.4102, + "r2-theta-0.5": 0.2119, + "r2-theta-1.0": 0.0868, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.1505, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "scenic", + "metric_values": { + "r1_all": 0.2395, + "r1_grn": 0.4074, + "r2-theta-0.0": 0.4074, + "r2-theta-0.5": 0.3697, + "r2-theta-1.0": 0.3555, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.372, + "r1_grn": 0.3259, + "r2-theta-0.0": 0.7108, + "r2-theta-0.5": 0.6294, + "r2-theta-1.0": 0.6673, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.3382, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "scenicplus", + "metric_values": { + "r1_all": 0.5082, + "r1_grn": 0.7513, + "r2-theta-0.0": 0.4854, + "r2-theta-0.5": 0.4367, + "r2-theta-1.0": 0.3658, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.7924, + "r1_grn": 0.6022, + "r2-theta-0.0": 0.9516, + "r2-theta-0.5": 1.0967, + "r2-theta-1.0": 1.0831, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.5433, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "scglue", + "metric_values": { + "r1_all": 0.1216, + "r1_grn": 0.5319, + "r2-theta-0.0": 0.2863, + "r2-theta-0.5": 0.3023, + "r2-theta-1.0": 0.3405, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.1874, + "r1_grn": 0.4259, + "r2-theta-0.0": 0.3365, + "r2-theta-0.5": 0.1601, + "r2-theta-1.0": 0.0631, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.1466, + "resources": {} + }, + { + "dataset_id": "op", + "method_id": "scprint", + "metric_values": { + "r1_all": 0.4163, + "r1_grn": 0.5612, + "r2-theta-0.0": 0.1803, + "r2-theta-0.5": 0.2889, + "r2-theta-1.0": 0.3428, + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0.6486, + "r1_grn": 0.4495, + "r2-theta-0.0": 0.0092, + "r2-theta-0.5": 0.0664, + "r2-theta-1.0": 0.1571, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.1663, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "celloracle", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "figr", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "granie", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "grnboost", + "metric_values": { + "r1_all": 0.0035, + "r1_grn": 0.0051, + "r2-theta-0.0": 0.0963, + "r2-theta-0.5": 0.0646, + "r2-theta-1.0": 0.0591, + "ws-theta-0.0": 0.662, + "ws-theta-0.5": 0.5992, + "ws-theta-1.0": 0.5404 + }, + "scaled_scores": { + "r1_all": 1.2197, + "r1_grn": 1.2368, + "r2-theta-0.0": 1.3291, + "r2-theta-0.5": 1.5093, + "r2-theta-1.0": 1.6618, + "ws-theta-0.0": 0.8745, + "ws-theta-0.5": 0.8684, + "ws-theta-1.0": 0.7173 + }, + "mean_score": 0.9325, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "negative_control", + "metric_values": { + "r1_all": -0.014, + "r1_grn": -0.014, + "r2-theta-0.0": 0.0071, + "r2-theta-0.5": 0.0088, + "r2-theta-1.0": 0.0179, + "ws-theta-0.0": 0.4883, + "ws-theta-0.5": 0.5023, + "ws-theta-1.0": 0.4956 + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0.1725, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0.0216, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "pearson_corr", + "metric_values": { + "r1_all": 0.0004, + "r1_grn": 0.0009, + "r2-theta-0.0": 0.071, + "r2-theta-0.5": 0.0436, + "r2-theta-1.0": 0.041, + "ws-theta-0.0": 0.6869, + "ws-theta-0.5": 0.6139, + "ws-theta-1.0": 0.558 + }, + "scaled_scores": { + "r1_all": 1, + "r1_grn": 1, + "r2-theta-0.0": 0.9525, + "r2-theta-0.5": 0.9413, + "r2-theta-1.0": 0.9313, + "ws-theta-0.0": 1, + "ws-theta-0.5": 1, + "ws-theta-1.0": 1 + }, + "mean_score": 0.9781, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "portia", + "metric_values": { + "r1_all": -0.0105, + "r1_grn": -0.0106, + "r2-theta-0.0": 0.0675, + "r2-theta-0.5": 0.0419, + "r2-theta-1.0": 0.0403, + "ws-theta-0.0": 0.5472, + "ws-theta-0.5": 0.511, + "ws-theta-1.0": 0.5035 + }, + "scaled_scores": { + "r1_all": 0.2394, + "r1_grn": 0.3641, + "r2-theta-0.0": 0.8997, + "r2-theta-0.5": 0.8938, + "r2-theta-1.0": 0.9038, + "ws-theta-0.0": 0.2968, + "ws-theta-0.5": 0.078, + "ws-theta-1.0": 0.1266 + }, + "mean_score": 0.4753, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "positive_control", + "metric_values": { + "r1_all": -0.0073, + "r1_grn": -0.0171, + "r2-theta-0.0": 0.0742, + "r2-theta-0.5": 0.0458, + "r2-theta-1.0": 0.0427, + "ws-theta-0.0": 0.6845, + "ws-theta-0.5": 0.6095, + "ws-theta-1.0": 0.5564 + }, + "scaled_scores": { + "r1_all": 0.4672, + "r1_grn": 0, + "r2-theta-0.0": 1, + "r2-theta-0.5": 1, + "r2-theta-1.0": 1, + "ws-theta-0.0": 0.9877, + "ws-theta-0.5": 0.9606, + "ws-theta-1.0": 0.9739 + }, + "mean_score": 0.7987, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "ppcor", + "metric_values": { + "r1_all": -0.0116, + "r1_grn": -0.0123, + "r2-theta-0.0": 0.0068, + "r2-theta-0.5": 0.009, + "r2-theta-1.0": 0.018, + "ws-theta-0.0": 0.5115, + "ws-theta-0.5": 0.503, + "ws-theta-1.0": 0.503 + }, + "scaled_scores": { + "r1_all": 0.1658, + "r1_grn": 0.2645, + "r2-theta-0.0": -0.0045, + "r2-theta-0.5": 0.0057, + "r2-theta-1.0": 0.0066, + "ws-theta-0.0": 0.1172, + "ws-theta-0.5": 0.0065, + "ws-theta-1.0": 0.1177 + }, + "mean_score": 0.0855, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "scenic", + "metric_values": { + "r1_all": 0.0064, + "r1_grn": 0.0114, + "r2-theta-0.0": 0.0268, + "r2-theta-0.5": 0.0168, + "r2-theta-1.0": 0.0222, + "ws-theta-0.0": 0.5707, + "ws-theta-0.5": 0.5371, + "ws-theta-1.0": 0.5131 + }, + "scaled_scores": { + "r1_all": 1.4205, + "r1_grn": 1.5896, + "r2-theta-0.0": 0.2931, + "r2-theta-0.5": 0.2171, + "r2-theta-1.0": 0.1727, + "ws-theta-0.0": 0.4149, + "ws-theta-0.5": 0.3122, + "ws-theta-1.0": 0.2806 + }, + "mean_score": 0.4613, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "scenicplus", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "scglue", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": "replogle", + "method_id": "scprint", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": {} + }, + { + "dataset_id": null, + "method_id": "celloracle", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-27 11:04:33", + "exit_code": 0, + "duration_sec": 22581, + "cpu_pct": 1629.4531, + "peak_memory_mb": 79975, + "disk_read_mb": 32101, + "disk_write_mb": 257790 + } + }, + { + "dataset_id": null, + "method_id": "figr", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-27 11:04:33", + "exit_code": 0, + "duration_sec": 46803, + "cpu_pct": 429.0028, + "peak_memory_mb": 271975, + "disk_read_mb": 16942, + "disk_write_mb": 9365 + } + }, + { + "dataset_id": null, + "method_id": "granie", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-27 11:04:34", + "exit_code": 0, + "duration_sec": 18034, + "cpu_pct": 85.6473, + "peak_memory_mb": 182375, + "disk_read_mb": 18649, + "disk_write_mb": 21516 + } + }, + { + "dataset_id": null, + "method_id": "grnboost", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-26 13:55:38", + "exit_code": 0, + "duration_sec": 99009, + "cpu_pct": 2127.4931, + "peak_memory_mb": "NA", + "disk_read_mb": 9459, + "disk_write_mb": 938 + } + }, + { + "dataset_id": null, + "method_id": "negative_control", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-26 13:55:38", + "exit_code": 0, + "duration_sec": 23495.6, + "cpu_pct": 16.8137, + "peak_memory_mb": "NA", + "disk_read_mb": 2790, + "disk_write_mb": 80 + } + }, + { + "dataset_id": null, + "method_id": "pearson_corr", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-26 13:55:38", + "exit_code": 0, + "duration_sec": 11919, + "cpu_pct": 67.0664, + "peak_memory_mb": "NA", + "disk_read_mb": 4060, + "disk_write_mb": 85 + } + }, + { + "dataset_id": null, + "method_id": "portia", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-26 13:55:38", + "exit_code": 0, + "duration_sec": 30773, + "cpu_pct": 155.7726, + "peak_memory_mb": "NA", + "disk_read_mb": 9826, + "disk_write_mb": 85 + } + }, + { + "dataset_id": null, + "method_id": "positive_control", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-26 13:55:38", + "exit_code": 0, + "duration_sec": 8808.9, + "cpu_pct": 59.7673, + "peak_memory_mb": "NA", + "disk_read_mb": 2973, + "disk_write_mb": 85 + } + }, + { + "dataset_id": null, + "method_id": "ppcor", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-26 13:55:38", + "exit_code": 0, + "duration_sec": 95331, + "cpu_pct": 97.1339, + "peak_memory_mb": "NA", + "disk_read_mb": 3700, + "disk_write_mb": 40 + } + }, + { + "dataset_id": null, + "method_id": "scenic", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-26 13:55:38", + "exit_code": 0, + "duration_sec": 171767, + "cpu_pct": 986.8026, + "peak_memory_mb": "NA", + "disk_read_mb": 55319, + "disk_write_mb": 9852 + } + }, + { + "dataset_id": null, + "method_id": "scenicplus", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-27 11:04:34", + "exit_code": 0, + "duration_sec": 71599, + "cpu_pct": 597.5403, + "peak_memory_mb": 805479, + "disk_read_mb": 414106, + "disk_write_mb": 219546 + } + }, + { + "dataset_id": null, + "method_id": "scglue", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-27 11:04:34", + "exit_code": 0, + "duration_sec": 13040, + "cpu_pct": 530.1482, + "peak_memory_mb": "NA", + "disk_read_mb": 90728, + "disk_write_mb": 85813 + } + }, + { + "dataset_id": null, + "method_id": "scprint", + "metric_values": { + "r1_all": "NA", + "r1_grn": "NA", + "r2-theta-0.0": "NA", + "r2-theta-0.5": "NA", + "r2-theta-1.0": "NA", + "ws-theta-0.0": "NA", + "ws-theta-0.5": "NA", + "ws-theta-1.0": "NA" + }, + "scaled_scores": { + "r1_all": 0, + "r1_grn": 0, + "r2-theta-0.0": 0, + "r2-theta-0.5": 0, + "r2-theta-1.0": 0, + "ws-theta-0.0": 0, + "ws-theta-0.5": 0, + "ws-theta-1.0": 0 + }, + "mean_score": 0, + "resources": { + "submit": "2025-06-27 06:44:57", + "exit_code": 0, + "duration_sec": 38717, + "cpu_pct": 14.7543, + "peak_memory_mb": 238592, + "disk_read_mb": 4495, + "disk_write_mb": 644 + } + } +] diff --git a/results/grn/data/state.yaml b/results/grn/data/state.yaml new file mode 100644 index 00000000..abbb0fc1 --- /dev/null +++ b/results/grn/data/state.yaml @@ -0,0 +1,9 @@ +id: process +output_scores: !file results.json +output_method_info: !file method_info.json +output_metric_info: !file metric_info.json +output_dataset_info: !file dataset_info.json +output_task_info: !file task_info.json +output_qc: !file quality_control.json +output_metric_execution_info: !file metric_execution_info.json + diff --git a/results/grn/data/task_info.json b/results/grn/data/task_info.json new file mode 100644 index 00000000..556a0a6b --- /dev/null +++ b/results/grn/data/task_info.json @@ -0,0 +1,50 @@ +{ + "task_id": "task_grn_inference", + "commit_sha": null, + "task_name": "GRN Inference", + "task_summary": "Benchmarking GRN inference methods\nLeaderboard: \n[Performance comparision](https://add-grn--openproblems.netlify.app/results/grn_inference/)\n\nArticle: [geneRNIB: a living benchmark for gene regulatory network inference](https://www.biorxiv.org/content/10.1101/2025.02.25.640181v1)\n\nDocumentation: \n[geneRNBI-doc](https://genernib-documentation.readthedocs.io/en/latest/)\n\nRepository:\n[openproblems-bio/task_grn_inference](https://github.com/openproblems-bio/task_grn_inference)\n\nIf you use this framework, please cite it as\n@article{nourisa2025genernib,\n title={geneRNIB: a living benchmark for gene regulatory network inference},\n author={Nourisa, Jalil and Passemiers, Antoine and Stock, Marco and Zeller-Plumhoff, Berit and Cannoodt, Robrecht and Arnold, Christian and Tong, Alexander and Hartford, Jason and Scialdone, Antonio and Moreau, Yves and others},\n journal={bioRxiv},\n pages={2025--02},\n year={2025},\n publisher={Cold Spring Harbor Laboratory}\n}\n", + "task_description": "\ngeneRNIB is a living benchmark platform for GRN inference. This platform provides curated datasets for GRN inference and evaluation, standardized evaluation protocols and metrics, computational infrastructure, and a dynamically updated leaderboard to track state-of-the-art methods. It runs novel GRNs in the cloud, offers competition scores, and stores them for future comparisons, reflecting new developments over time.\n\nThe platform supports the integration of new inference methods, datasets and protocols. When a new feature is added, previously evaluated GRNs are re-assessed, and the leaderboard is updated accordingly. The aim is to evaluate both the accuracy and completeness of inferred GRNs. It is designed for both single-modality and multi-omics GRN inference. \n\nIn the current version, geneRNIB contains 10 inference methods including both single and multi-omics, 8 evalation metrics, and five datasets. \n\nSee our publication for the details of methods. \n", + "repo": "https://github.com/openproblems-bio/task_grn_inference", + "issue_tracker": "https://github.com/openproblems-bio/task_grn_inference/issues", + "authors": [ + { + "name": "Jalil Nourisa", + "roles": "author", + "info": { + "github": "janursa", + "orcid": "0000-0002-7539-4396" + } + }, + { + "name": "Robrecht Cannoodt", + "roles": "author", + "info": { + "github": "rcannood", + "orcid": "0000-0003-3641-729X" + } + }, + { + "name": "Antoine Passimier", + "roles": "contributor", + "info": { + "github": "AntoinePassemiers" + } + }, + { + "name": "Marco Stock", + "roles": "contributor", + "info": { + "github": "stkmrc" + } + }, + { + "name": "Christian Arnold", + "roles": "contributor", + "info": { + "github": "chrarnold" + } + } + ], + "version": "build_main", + "license": "MIT" +} diff --git a/results/grn/index.qmd b/results/grn/index.qmd new file mode 100644 index 00000000..e69de29b