Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 12 additions & 13 deletions compose_runner/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ def __init__(
self.cached_studyset = None
self.cached_annotation = None
self.cached_specification = None
self.first_dataset = None
self.second_dataset = None
self.first_studyset = None
self.second_studyset = None
self.estimator = None
self.corrector = None

Expand Down Expand Up @@ -292,13 +292,9 @@ def process_bundle(self, n_cores=None):
studyset = Studyset(self.cached_studyset)
annotation = Annotation(self.cached_annotation, studyset)
first_studyset, second_studyset = self.apply_filter(studyset, annotation)
first_dataset = first_studyset.to_dataset()
second_dataset = (
second_studyset.to_dataset() if second_studyset is not None else None
)
estimator, corrector = self.load_specification(n_cores=n_cores)
self.first_dataset = first_dataset
self.second_dataset = second_dataset
self.first_studyset = first_studyset
self.second_studyset = second_studyset
self.estimator = estimator
self.corrector = corrector

Expand All @@ -323,25 +319,28 @@ def create_result_object(self):
raise ValueError(f"Could not create result for {self.meta_analysis_id}")

def run_meta_analysis(self):
if self.second_dataset and isinstance(self.estimator, PairwiseCBMAEstimator):
if self.second_studyset and isinstance(self.estimator, PairwiseCBMAEstimator):
workflow = PairwiseCBMAWorkflow(
estimator=self.estimator,
corrector=self.corrector,
diagnostics="focuscounter",
output_dir=self.result_dir,
)
self.meta_results = workflow.fit(self.first_dataset, self.second_dataset)
elif self.second_dataset is None and isinstance(self.estimator, CBMAEstimator):
self.meta_results = workflow.fit(
self.first_studyset,
self.second_studyset,
)
elif self.second_studyset is None and isinstance(self.estimator, CBMAEstimator):
workflow = CBMAWorkflow(
estimator=self.estimator,
corrector=self.corrector,
diagnostics="focuscounter",
output_dir=self.result_dir,
)
self.meta_results = workflow.fit(self.first_dataset, self.second_dataset)
self.meta_results = workflow.fit(self.first_studyset)
else:
raise ValueError(
f"Estimator {self.estimator} and datasets {self.first_dataset} and {self.second_dataset} are not compatible."
f"Estimator {self.estimator} and studysets {self.first_studyset} and {self.second_studyset} are not compatible."
)
self._persist_meta_results()

Expand Down
46 changes: 40 additions & 6 deletions compose_runner/tests/test_cli.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,47 @@
from click.testing import CliRunner

from compose_runner import cli as cli_module
from compose_runner.cli import cli


def test_cli():
def test_cli(monkeypatch):
calls = {}

def fake_run(meta_analysis_id, environment, result_dir, nsc_key, nv_key, no_upload, n_cores):
calls["args"] = {
"meta_analysis_id": meta_analysis_id,
"environment": environment,
"result_dir": result_dir,
"nsc_key": nsc_key,
"nv_key": nv_key,
"no_upload": no_upload,
"n_cores": n_cores,
}
return "https://example.org/result", None

monkeypatch.setattr(cli_module, "run", fake_run)

runner = CliRunner()
result = runner.invoke(cli, [
"4nBwrGsqVWtt",
'--environment', "staging",
"--n-cores", 1,
"--no-upload"])
result = runner.invoke(
cli,
[
"3opENJpHxRsH",
"--environment",
"staging",
"--n-cores",
1,
"--no-upload",
],
)

assert result.exit_code == 0
assert calls["args"] == {
"meta_analysis_id": "3opENJpHxRsH",
"environment": "staging",
"result_dir": None,
"nsc_key": None,
"nv_key": None,
"no_upload": True,
"n_cores": 1,
}
assert "https://example.org/result" in result.output
116 changes: 115 additions & 1 deletion compose_runner/tests/test_run.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import pytest
from requests.exceptions import HTTPError

from compose_runner import run as run_module
from compose_runner.run import Runner


@pytest.mark.vcr(record_mode="once")
def test_incorrect_id():
runner = Runner(
Expand Down Expand Up @@ -62,6 +62,120 @@ def test_run_string_group_comparison_workflow():
)
runner.run_workflow()


def test_process_bundle_keeps_studysets(monkeypatch):
first_studyset = object()
second_studyset = object()
estimator = object()
corrector = object()

class FakeStudyset:
def __init__(self, source):
self.source = source

class FakeAnnotation:
def __init__(self, source, studyset):
self.source = source
self.studyset = studyset

def fake_apply_filter(self, studyset, annotation):
assert isinstance(studyset, FakeStudyset)
assert isinstance(annotation, FakeAnnotation)
return first_studyset, second_studyset

def fake_load_specification(self, n_cores=None):
assert n_cores == 3
return estimator, corrector

monkeypatch.setattr(run_module, "Studyset", FakeStudyset)
monkeypatch.setattr(run_module, "Annotation", FakeAnnotation)
monkeypatch.setattr(Runner, "apply_filter", fake_apply_filter)
monkeypatch.setattr(Runner, "load_specification", fake_load_specification)

runner = Runner(meta_analysis_id="made_up_id", environment="staging")
runner.cached_studyset = {"id": "studyset", "studies": []}
runner.cached_annotation = {"note_keys": {}}

runner.process_bundle(n_cores=3)

assert runner.first_studyset is first_studyset
assert runner.second_studyset is second_studyset
assert runner.estimator is estimator
assert runner.corrector is corrector


def test_run_meta_analysis_single_studyset_uses_cbma_workflow(monkeypatch, tmp_path):
calls = {}

class FakeCBMAEstimator:
pass

class FakeWorkflow:
def __init__(self, estimator, corrector, diagnostics, output_dir):
calls["init"] = {
"estimator": estimator,
"corrector": corrector,
"diagnostics": diagnostics,
"output_dir": output_dir,
}

def fit(self, dataset):
calls["fit"] = {"dataset": dataset}
return "meta-results"

monkeypatch.setattr(run_module, "CBMAEstimator", FakeCBMAEstimator)
monkeypatch.setattr(run_module, "CBMAWorkflow", FakeWorkflow)

runner = Runner(meta_analysis_id="made_up_id", environment="staging", result_dir=tmp_path)
runner.first_studyset = object()
runner.second_studyset = None
runner.estimator = FakeCBMAEstimator()
runner.corrector = object()
runner._persist_meta_results = lambda: None

runner.run_meta_analysis()

assert calls["fit"] == {"dataset": runner.first_studyset}
assert runner.meta_results == "meta-results"


def test_run_meta_analysis_pairwise_uses_pairwise_workflow(monkeypatch, tmp_path):
calls = {}

class FakePairwiseEstimator:
pass

class FakeWorkflow:
def __init__(self, estimator, corrector, diagnostics, output_dir):
calls["init"] = {
"estimator": estimator,
"corrector": corrector,
"diagnostics": diagnostics,
"output_dir": output_dir,
}

def fit(self, dataset1, dataset2):
calls["fit"] = {"dataset1": dataset1, "dataset2": dataset2}
return "pairwise-results"

monkeypatch.setattr(run_module, "PairwiseCBMAEstimator", FakePairwiseEstimator)
monkeypatch.setattr(run_module, "PairwiseCBMAWorkflow", FakeWorkflow)

runner = Runner(meta_analysis_id="made_up_id", environment="staging", result_dir=tmp_path)
runner.first_studyset = object()
runner.second_studyset = object()
runner.estimator = FakePairwiseEstimator()
runner.corrector = object()
runner._persist_meta_results = lambda: None

runner.run_meta_analysis()

assert calls["fit"] == {
"dataset1": runner.first_studyset,
"dataset2": runner.second_studyset,
}
assert runner.meta_results == "pairwise-results"

# def test_yifan_workflow():
# runner = Runner(
# meta_analysis_id="4WELjap2yCJm",
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ classifiers = [
"Programming Language :: Python :: 3",
]
dynamic = ["version"]
dependencies = ["nimare>=0.11.0", "click", "sentry-sdk", "numpy"]
dependencies = ["nimare>=0.11.1", "click", "sentry-sdk", "numpy"]

[project.urls]
Repository = "https://github.com/neurostuff/compose-runner"
Expand Down
Loading