diff --git a/src/methods/scgpt_finetuned/config.vsh.yaml b/src/methods/scgpt_finetuned/config.vsh.yaml index 2b949cbb..17476c3c 100644 --- a/src/methods/scgpt_finetuned/config.vsh.yaml +++ b/src/methods/scgpt_finetuned/config.vsh.yaml @@ -49,15 +49,7 @@ resources: engines: - type: docker image: openproblems/base_pytorch_nvidia:1 - # TODO: Try to find working installation of flash attention (flash-attn<1.0.5) setup: - #- type: python - # pypi: - # - gdown - # - scgpt # Install from PyPI to get dependencies - #- type: docker - # # Force re-installing from GitHub to get bug fixes - # run: pip install --upgrade --no-deps --force-reinstall git+https://github.com/bowang-lab/scGPT.git - type: docker run: | git clone https://github.com/bowang-lab/scGPT && \ diff --git a/src/methods/scgpt_zeroshot/config.vsh.yaml b/src/methods/scgpt_zeroshot/config.vsh.yaml index 3ff6425c..cea521b0 100644 --- a/src/methods/scgpt_zeroshot/config.vsh.yaml +++ b/src/methods/scgpt_zeroshot/config.vsh.yaml @@ -51,15 +51,6 @@ resources: engines: - type: docker image: openproblems/base_pytorch_nvidia:1 - # TODO: Try to find working installation of flash attention (flash-attn<1.0.5) - setup: - #- type: python - # pypi: - # - gdown - # - scgpt # Install from PyPI to get dependencies - #- type: docker - # # Force re-installing from GitHub to get bug fixes - # run: pip install --upgrade --no-deps --force-reinstall git+https://github.com/bowang-lab/scGPT.git - type: docker run: | git clone https://github.com/bowang-lab/scGPT && \ diff --git a/src/methods/scprint/config.vsh.yaml b/src/methods/scprint/config.vsh.yaml index 0e3020e5..0a9a6c7d 100644 --- a/src/methods/scprint/config.vsh.yaml +++ b/src/methods/scprint/config.vsh.yaml @@ -61,7 +61,7 @@ arguments: - name: --max_len type: integer description: The maximum length of the gene sequence. - default: 4000 + default: 2300 resources: - type: python_script @@ -86,7 +86,7 @@ engines: script: from scdataloader.utils import populate_my_ontology; populate_my_ontology() runners: - type: executable - # docker_run_args: --gpus all + # docker_run_args: --gpus all - type: nextflow directives: label: [hightime, highmem, midcpu, gpu, highsharedmem]