From 8e16b497767fe71df16f222b8c6744d1980709dd Mon Sep 17 00:00:00 2001 From: "Laurent Mignon (ACSONE)" Date: Wed, 25 Jun 2025 10:32:43 +0200 Subject: [PATCH] [IMP] rest_log: Don't log retried error If a retryable error occurs, it should only be logged if it will no more be retried by the retrying mechanism. This is required to avoid to log errors for requests that end successfully for the user thanks to the Odoo's retry mechanism. --- rest_log/components/service.py | 38 +++++++++++++++++++++++------- rest_log/tests/test_db_logging.py | 39 ++++++++++++++++++++++--------- 2 files changed, 58 insertions(+), 19 deletions(-) diff --git a/rest_log/components/service.py b/rest_log/components/service.py index 9aedbf11b..527714fc6 100644 --- a/rest_log/components/service.py +++ b/rest_log/components/service.py @@ -3,6 +3,7 @@ # @author Simone Orsi # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html). +import inspect import json import logging import traceback @@ -93,9 +94,37 @@ def _log_dispatch_success(self, method_name, result, *args, params=None): except Exception as e: _logger.exception("Rest Log Error Creation: %s", e) + def _is_request_will_be_retried(self): + # we need to analyze the current call stack to determine if + # a 'tryleft' local variable is set, which indicates that + # the current request is being retried. + # This is a workaround for the fact that we cannot access + # the 'tryleft' variable from the request context, as it is + # not part of the request object. + # This is a hack, but it is the only way to determine if + # the current request is being retried. + stack = inspect.stack() + for frame in stack: + if "tryleft" in frame.frame.f_locals: + return frame.frame.f_locals["tryleft"] + return False + def _dispatch_exception( self, method_name, exception_klass, orig_exception, *args, params=None ): + must_return_original_exception = False + if ( + isinstance(orig_exception, OperationalError) + and orig_exception.pgcode in PG_CONCURRENCY_ERRORS_TO_RETRY + ): + must_return_original_exception = True + if self._is_request_will_be_retried(): + # If we are in a retry, we don't log the error in the database, + # as it will be retried and logged again. And we let the + # OperationalError bubble up to the retrying mechanism where + # it will be handled by the default handler at the end of the chain. + raise orig_exception + exc_msg, log_entry_url = None, None # in case it fails below try: exc_msg = self._get_exception_message(orig_exception) @@ -113,14 +142,7 @@ def _dispatch_exception( log_entry_url = self._get_log_entry_url(log_entry) except Exception as e: _logger.exception("Rest Log Error Creation: %s", e) - # let the OperationalError bubble up to the retrying mechanism - # We can't wrap the OperationalError because we want to let it - # bubble up to the retrying mechanism, it will be handled by - # the default handler at the end of the chain. - if ( - isinstance(orig_exception, OperationalError) - and orig_exception.pgcode in PG_CONCURRENCY_ERRORS_TO_RETRY - ): + if must_return_original_exception: raise orig_exception raise exception_klass(exc_msg, log_entry_url) from orig_exception diff --git a/rest_log/tests/test_db_logging.py b/rest_log/tests/test_db_logging.py index 998f0c8e0..cb7c142af 100644 --- a/rest_log/tests/test_db_logging.py +++ b/rest_log/tests/test_db_logging.py @@ -390,7 +390,9 @@ def tearDownClass(cls): cls._teardown_registry(cls) super().tearDownClass() - def _test_exception(self, test_type, wrapping_exc, exc_name, severity): + def _test_exception( + self, test_type, wrapping_exc, exc_name, severity, will_be_retried=True + ): log_model = self.env["rest.log"].sudo() initial_entries = log_model.search([]) # Context: we are running in a transaction case which uses savepoints. @@ -400,7 +402,9 @@ def _test_exception(self, test_type, wrapping_exc, exc_name, severity): # Init fake collection w/ new env collection = _PseudoCollection(self._collection_name, new_env) service = self._get_service(self, collection=collection) - with self._get_mocked_request(env=new_env): + with self._get_mocked_request(env=new_env), mock.patch.object( + service, "_is_request_will_be_retried", return_value=will_be_retried + ): try: service.dispatch("fail", test_type) except Exception as err: @@ -413,15 +417,20 @@ def _test_exception(self, test_type, wrapping_exc, exc_name, severity): with new_rollbacked_env() as new_env: log_model = new_env["rest.log"].sudo() entry = log_model.search([]) - initial_entries - expected = { - "collection": service._collection, - "state": "failed", - "result": "null", - "exception_name": exc_name, - "exception_message": "Failed as you wanted!", - "severity": severity, - } - self.assertRecordValues(entry, [expected]) + if will_be_retried: + # retryable error must bubble up to the retrying mechanism + self.assertEqual(len(entry), 0) + else: + # not retryable, so we log it + expected = { + "collection": service._collection, + "state": "failed", + "result": "null", + "exception_name": exc_name, + "exception_message": "Failed as you wanted!", + "severity": severity, + } + self.assertRecordValues(entry, [expected]) @staticmethod def _get_test_controller(class_or_instance, root_path=None): @@ -436,4 +445,12 @@ def test_log_exception_retryable(self): FakeConcurrentUpdateError, "odoo.addons.rest_log.tests.common.FakeConcurrentUpdateError", "warning", + will_be_retried=True, + ) + self._test_exception( + "retryable", + FakeConcurrentUpdateError, + "odoo.addons.rest_log.tests.common.FakeConcurrentUpdateError", + "warning", + will_be_retried=False, )