Skip to content

TypeError: cannot pickle '_thread.RLock' object with ProcessPoolExecutor #53

@samuelcolvin

Description

@samuelcolvin

When Trying to use ProcessPoolExecutor, I get an error:

code/pydantic-ai 130 ➤ uv run pytest tests/test_examples.py -k graph --update-examples     
============================================================================================= test session starts ==============================================================================================
platform darwin -- Python 3.12.6, pytest-8.3.3, pluggy-1.5.0
rootdir: /Users/samuel/code/pydantic-ai
configfile: pyproject.toml
plugins: inline-snapshot-0.14.0, logfire-2.6.2, devtools-0.12.2, examples-0.0.15, mock-3.14.0, pretty-1.2.0, anyio-4.6.2.post1
collected 118 items / 101 deselected / 17 selected                                                                                                                                                             

tests/test_examples.py ..........F                                                                                                                                                                       [ 64%]
tests/test_examples.py:55 test_docs_examples[docs/graph.md:643-709] - TypeError: cannot pickle '_thread.RLock' object…
tests/test_examples.py s.....                                                                                                                                                                            [100%]

=================================================================================================== FAILURES ===================================================================================================
__________________________________________________________________________________ test_docs_examples[docs/graph.md:643-709] ___________________________________________________________________________________

example = CodeExample(source='from __future__ import annotations\n\nimport asyncio\nfrom concurrent.futures.process import Proce...D('99e16cec-fcc7-44ff-973e-449f8e7bc3d8'), test_id='tests/test_examples.py::test_docs_examples[docs/graph.md:643-709]')
eval_example = <pytest_examples.eval_example.EvalExample object at 0x109d66c00>, mocker = <pytest_mock.plugin.MockerFixture object at 0x109d66ae0>
client_with_handler = <async_generator object client_with_handler at 0x109d02180>, env = <tests.conftest.TestEnv object at 0x109d668d0>
tmp_path = PosixPath('/private/var/folders/s6/mh0x1z6d6mqclzdtv72zgx_40000gn/T/pytest-of-samuel/pytest-782/test_docs_examples_docs_graph_10'), set_event_loop = None

    @pytest.mark.parametrize('example', find_filter_examples(), ids=str)
    def test_docs_examples(
        example: CodeExample,
        eval_example: EvalExample,
        mocker: MockerFixture,
        client_with_handler: ClientWithHandler,
        env: TestEnv,
        tmp_path: Path,
        set_event_loop: None,
    ):
        mocker.patch('pydantic_ai.agent.models.infer_model', side_effect=mock_infer_model)
        mocker.patch('pydantic_ai._utils.group_by_temporal', side_effect=mock_group_by_temporal)
        mocker.patch('pydantic_ai.models.vertexai._creds_from_file', return_value=MockCredentials())
    
        mocker.patch('httpx.Client.get', side_effect=http_request)
        mocker.patch('httpx.Client.post', side_effect=http_request)
        mocker.patch('httpx.AsyncClient.get', side_effect=async_http_request)
        mocker.patch('httpx.AsyncClient.post', side_effect=async_http_request)
        mocker.patch('random.randint', return_value=4)
        mocker.patch('rich.prompt.Prompt.ask', side_effect=rich_prompt_ask)
    
        env.set('OPENAI_API_KEY', 'testing')
        env.set('GEMINI_API_KEY', 'testing')
        env.set('GROQ_API_KEY', 'testing')
    
        sys.path.append('tests/example_modules')
    
        prefix_settings = example.prefix_settings()
        opt_title = prefix_settings.get('title')
        opt_test = prefix_settings.get('test', '')
        opt_lint = prefix_settings.get('lint', '')
        noqa = prefix_settings.get('noqa', '')
        python_version = prefix_settings.get('py', None)
    
        if python_version:
            python_version_info = tuple(int(v) for v in python_version.split('.'))
            if sys.version_info < python_version_info:
                pytest.skip(f'Python version {python_version} required')
    
        cwd = Path.cwd()
    
        if opt_test.startswith('skip') and opt_lint.startswith('skip'):
            pytest.skip('both running code and lint skipped')
    
        if opt_title == 'sql_app_evals.py':
            os.chdir(tmp_path)
            examples = [{'request': f'sql prompt {i}', 'sql': f'SELECT {i}'} for i in range(15)]
            with (tmp_path / 'examples.json').open('w') as f:
                json.dump(examples, f)
    
        ruff_ignore: list[str] = ['D', 'Q001']
        # `from bank_database import DatabaseConn` wrongly sorted in imports
        # waiting for https://github.com/pydantic/pytest-examples/issues/43
        # and https://github.com/pydantic/pytest-examples/issues/46
        if 'import DatabaseConn' in example.source:
            ruff_ignore.append('I001')
    
        if noqa:
            ruff_ignore.extend(noqa.upper().split())
    
        line_length = int(prefix_settings.get('line_length', '88'))
    
        eval_example.set_config(ruff_ignore=ruff_ignore, target_version='py39', line_length=line_length)
        eval_example.print_callback = print_callback
    
        call_name = prefix_settings.get('call_name', 'main')
    
        if not opt_lint.startswith('skip'):
            if eval_example.update_examples:  # pragma: no cover
                eval_example.format(example)
            else:
                eval_example.lint(example)
    
        if opt_test.startswith('skip'):
            print(opt_test[4:].lstrip(' -') or 'running code skipped')
        else:
            if eval_example.update_examples:  # pragma: no cover
>               module_dict = eval_example.run_print_update(example, call=call_name)

tests/test_examples.py:133: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
docs/graph.md:694: in main
    result, history = await fives_graph.run(DivisibleBy5(3), deps=deps)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

    async def run(self, ctx: GraphContext) -> DivisibleBy5:
        loop = asyncio.get_running_loop()
>       compute_result = await loop.run_in_executor(
E       TypeError: cannot pickle '_thread.RLock' object

docs/graph.md:678: TypeError
=============================================================================================== inline snapshot ================================================================================================
                                                   Summary of Failures                                                   
┏━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┓
┃  File                    ┃  Function                                   ┃  Function Line  ┃  Error Line  ┃  Error      ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━┩
│  tests/test_examples.py  │  test_docs_examples[docs/graph.md:643-709]  │  56             │  133         │  TypeError  │
└──────────────────────────┴─────────────────────────────────────────────┴─────────────────┴──────────────┴─────────────┘
Results (0.79s):
         1 failed
        15 passed
         1 skipped
       101 deselected

Ref pydantic/pydantic-ai#693

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions