diff --git a/etc/openqa/openqa.ini.20250512_222339.bak b/etc/openqa/openqa.ini.20250512_222339.bak new file mode 100644 index 00000000000..e428f56171c --- /dev/null +++ b/etc/openqa/openqa.ini.20250512_222339.bak @@ -0,0 +1,368 @@ +[global] +## Web site name for tab titles and bookmarks +#appname = openQA + +## type of branding - [ openSUSE, plain, openqa.suse.de ] +#branding = plain + +## type and location of needle repo +#scm = git + +## space separated list of domains from which asset download with +## _URL params is allowed. Matched at the end of the hostname in +## the URL. with these values downloads from opensuse.org, +## dl.fedoraproject.org, and a.b.c.opensuse.org are allowed; downloads +## from moo.net, dl.opensuse and fedoraproject.org.evil are not +## default is undefined, meaning asset download is *disabled*, you +## must set this option to enable it +#download_domains = fedoraproject.org opensuse.org + +## set if you have a local repo mirror +#suse_mirror = http://FIXME + +## base url [default: $self->req->url->base] +#base_url = http://FIXME + +## days for Strict-Transport-Security, 0 to not add this header +## http://en.wikipedia.org/wiki/Strict-Transport-Security +#hsts = 365 + +## Set to 0 to disable auditing backend +#audit_enabled = 1 + +## Set to 1 to enable profiling +## * Needs Mojolicious::Plugin::NYTProf +## * Profiling data will be PUBLICLY accessible under /nytprof route. +## * The plugin impairs performance and the generated profiling data might quickly +## utilize a lot of disk space. So don't enable this plugin in production. +#profiling_enabled = 0 + +## Set to 1 to enable monitoring +## * Needs Mojolicious::Plugin::Status +## * Monitoring will be accessible to operators and admins under /monitoring route. +## * The plugin can impair performance significantly with prefork workers enabled. +## So don't enable this plugin in production. +#monitoring_enabled = 0 + +## space-separated list of extra plugins to load; plugin must be under +## OpenQA::WebAPI::Plugin and correctly-cased module name given here, +## this example loads OpenQA::WebAPI::Plugin::AMQP +#plugins = AMQP + +## space-separated list of asset types *not* to show links for in the +## web UI. Default is 'repo' +#hide_asset_types = repo iso + +## space-separated list of domains recognized by job labeling +## If openQA detects (via 'Referer' header) that a test job was accessed from a +## domain on that list, this job is labeled as linked and considered as important. +#recognized_referers = bugzilla.suse.com bugzilla.opensuse.org progress.opensuse.org github.com + +## A regex in a string of test settings to ignore for "job investigation" +#job_investigate_ignore = "(JOBTOKEN|NAME)" + +## Specify the number of seconds until an unresponsive worker is considered offline +## and its currently assigned jobs are taken care of by the stale job detection +#worker_timeout = 1800 + +## Timeout for the git command in "job investigation" +#job_investigate_git_timeout = 20 + +## Limit for the git log --stat in "job investigation" +#job_investigate_git_log_limit = 200 + + +## Gracefully restart the prefork workers if they reach a certain memory limit (in kB) +#max_rss_limit = 180000 + +## Causes jobs reported as incomplete by the worker to be cloned automatically when the +## reason matches; set to 0 to disable +#auto_clone_regex = ^(cache failure: |terminated prematurely: |api failure: Failed to register .* 503|backend died: .*VNC.*(timeout|timed out|refused)|QEMU terminated: Failed to allocate KVM HPT of order 25.* Cannot allocate memory) +## The maximum amount of times a job will be restarted if the auto_clone_regex matches +#auto_clone_limit = 20 + +## A regex pattern that a "force_result" label description in job comments +## must match to be accepted. If undefined and by default no rules are applied +## and no description is expected +#force_result_regex = + +## Job results to collapse by default as parallel children on the test result +## overview page (defaults to passed/softfailed, set to empty value to disable +## collapsing parallel children by default completely) +#parallel_children_collapsable_results = passed softfailed + +## For the developer mode panel: if not accessing the web UI via Apache/NGINX reverse +## proxy, then connect to the livehandler daemon at the + service_port_delta. +## The livehandler daemon is supposed to run under the + 2. +## If you want to keep using a reverse proxy while accessing the web UI through a custom port +## (e.g. 8080) then just set `service_port_delta = 0`. +#service_port_delta = 2 + +## Allowed time difference in hmac validation in seconds. +## Higher values introduce higher risks for replay attacks but make API requests more +## resilient in case of a high load on the web UI. Lower values reduce this risk but +## can cause jobs to incomplete with "timestamp mismatch" error messages. +#api_hmac_time_tolerance = 300 + +#[scm git] +## name of remote to get updates from before committing changes (e.g. origin, leave out-commented to disable remote update) +#update_remote = origin +## name of branch to rebase against before committing changes (e.g. origin/master, leave out-commented to disable rebase) +#update_branch = origin/master +## whether to do a hard reset of the local repository before rebasing +#do_cleanup = no +## whether committed changes should be pushed +#do_push = no +## whether to clone CASEDIR or NEEDLES_DIR on the web UI if that var points to a Git repo +#git_auto_clone = yes +## enable automatic updates of all test code and needles managed via Git +#git_auto_update = yes +## specifies how to handle errors on automatic updates via git_auto_update +## - when set to "best-effort" openQA jobs are started even if the update failed +## - when set to "strict" openQA jobs will be blocked until the update succeeded or set to incomplete when the retries for updating are exhausted +#git_auto_update_method = best-effort +## whether openQA should attempt to display needles according to the original commits in the web UI +## on test result step view +## warning: This causes extra Git checkouts and increased response times of the test result step view. +## You probably also want to enable `openqa-enqueue-needle-ref-cleanup.service`. +#checkout_needles_sha = no +## whether openQA should fetch needles (via checkout_needles_sha) from foreign remote URLs +## warning: this will hit performance even further and fetching from arbitrary URLs might have security aspects +#allow_arbitrary_url_fetch = no +## retention for storing temporary needle refs in minutes +#temp_needle_refs_retention = 120 + +## Authentication method to use for user management +[auth] +#method = Fake|OpenID|OAuth2 +## whether authentication is required to access assets; caveats: +## - Does NOT affect assets made available outside the scope of the openQA service (e.g. via Apache, NGINX or NFS). +## - Might not work well with other features like openqa-clone-job and the cache service. +#require_for_assets = 0 + +## for GitHub, salsa.debian.org and providers listed on https://metacpan.org/pod/Mojolicious::Plugin::OAuth2#Configuration +## one can use: +#[oauth2] +#provider = github|debian_salsa +#key = ... +#secret = ... + +## alternatively, one can specify parameters manually without relying on magic a provider name: +#[oauth2] +#provider = custom +#unique_name = debian_salsa +#key = ... +#secret = ... +#authorize_url = https://salsa.debian.org/oauth/authorize?response_type=code +#token_url = https://salsa.debian.org/oauth/token +#user_url = https://salsa.debian.org/api/v4/user +#token_scope = read_user +#token_label = Bearer +#id_from = id +#nickname_from = username +# +# The default value for `id_from` is "id" for backwards compatibility. +# For OpenID Connect, the value should be "sub", following the OpenID Connect ID Token standard. + +[logging] +## logging is to stderr (so systemd journal) by default +## if you use a file, remember the apparmor profile! +#file = /var/log/openqa +#level = debug +#sql_debug = true + +## Configuration for OpenID auth method +[openid] +## base url for openid provider +#provider = https://www.opensuse.org/openid/user/ +## enforce redirect back to https +#httpsonly = 1 + +## Configuration for auditing plugin +[audit] +## disable auditing of chatty events by default +blocklist = job_grab job_done + +## Sets the storage duration in days for the different audit event types +[audit/storage_duration] +## By default cleanup is disabled, see http://open.qa/docs/#_auditing_tracking_openqa_changes +## The following categories with example values can be uncommented as needed +#startup = 10 +#jobgroup = 365 +#jobtemplate = 365 +#table = 365 +#iso = 60 +#user = 60 +#asset = 30 +#needle = 30 +#other = 15 + +## Configuration for AMQP plugin +[amqp] +#reconnect_timeout = 5 +#publish_attempts = 10 +#publish_retry_delay = 1 +#publish_retry_delay_factor = 1.75 +# guest/guest is the default anonymous user/pass for RabbitMQ +#url = amqp://guest:guest@localhost:5672/ +#exchange = pubsub +#topic_prefix = suse +#cacertfile = +#certfile = +#keyfile = + +## Cleanup related configuration (besides limits) +[cleanup] +## Specifies whether different cleanup tasks can run in parallel. This option +## makes particularly sense if assets and results are stored on different storage +## locations or if your common storage solution is performant enough. +concurrent = 0 + +## Limits for cleanup of jobs in groups with no limits configured explicitly (sizes are in GiB, durations in days, zero denotes infinity) +[default_group_limits] +#asset_size_limit is only used on job group level (parent groups have no default) +#asset_size_limit = 100 +#log_storage_duration = 30 +#important_log_storage_duration = 120 +#result_storage_duration = 365 +#important_result_storage_duration = 0 + +## Limits for cleanup of jobs outside of groups (sizes are in GiB, durations in days, zero denotes infinity) +[no_group_limits] +#log_storage_duration = 30 +#important_log_storage_duration = 120 +#result_storage_duration = 365 +#important_result_storage_duration = 0 + +[minion_task_triggers] +## Specify one or more task names (space-separated), by default these are not enabled. +## Good candidates would be limit_assets or limit_results_and_logs. +## This is analogous to triggering tasks via systemd timers using +## openqa-enqueue-asset-cleanup or openqa-enqueue-result-cleanup except +## it's triggered whenever a job is done rather than periodically. +#on_job_done = limit_results_and_logs limit_assets + +[misc_limits] +#untracked_assets_storage_duration = 14 +## Performs the cleanup of results/assets only if the free disk space on the relevant partition is below the specified percentage (and aborts early otherwise) +#result_cleanup_max_free_percentage = 100 +#asset_cleanup_max_free_percentage = 100 +## Specify the screenshot ID range to query at once from the database (reduce to avoid big queries, increase to lower query overhead) +#screenshot_cleanup_batch_size = 200000 +## Specify the number of screenshot ID ranges (with a size as configured by screenshot_cleanup_batch_size) to process in a single Minion +## job (reduce to avoid Minion jobs from running very long and possibly being interrupted, increase to reduce the number of Minion jobs) +#screenshot_cleanup_batches_per_minion_job = 450 +## Extends the job result cleanup to ensure the partition results are stored on does not become too full +## (still experimental, relies on df) +#results_min_free_disk_space_percentage = 0 +## Duration to keep minion jobs in the database, in seconds +#minion_job_max_age = 604800 +## Default number of templates to include in job_templates api response (to prevent performance issues) +#generic_default_limit = 10000 +## Maximum number of templates to include in job_templates api response (to prevent performance issues) +#generic_max_limit = 100000 +## Maximum number of jobs to include in tests overview page (to prevent performance issues) +#tests_overview_max_jobs = 2000 +## Default number of jobs to include in table of finished jobs on "All tests" page +#all_tests_default_finished_jobs = 500 +## Maximum number of jobs to include in table of finished jobs on "All tests" page +#all_tests_max_finished_jobs = 10000 +## Default number of templates to include in job_templates api response (to prevent performance issues) +#list_templates_default_limit = 5000 +## Maximum number of templates to include in job_templates api response (to prevent performance issues) +#list_templates_max_limit = 20000 +## Default number of next jobs to include in jobs request (to prevent performance issues) +#next_jobs_default_limit = 500 +## Maximum number of next jobs to include in jobs request (to prevent performance issues) +#next_jobs_max_limit = 10000 +## Default number of previous jobs to include in jobs request (to prevent performance issues) +#previous_jobs_default_limit = 500 +## Maximum number of previous jobs to include in jobs request (to prevent performance issues) +#previous_jobs_max_limit = 10000 +## Maximum number of recent jobs to consider when returning jobs for a settings query (to prevent performance issues) +#job_settings_max_recent_jobs = 20000 +## Default number of assets to include in asset listing request (to prevent performance issues) +#assets_default_limit = 100000 +## Maximum number of next jobs to include asset listing request (to prevent performance issues) +#assets_max_limit = 200000 +## Maximum number of online workers (to prevent performance issues) +## Set to a low enough number if the openQA scheduler does not assign jobs +## anymore or if openqa-scheduler reports errors about unresponsive +## websockets server +#max_online_workers = 1000 +## Retry delay for limited workers in seconds +#worker_limit_retry_delay = 900 +## Maximum number of retries with exponential back-off for GruJobs to wait for a GruTask to appear in the database +#wait_for_grutask_retries = 6 + +[archiving] +## Moves logs of jobs which are preserved during the cleanup because they are considered important +## to "${OPENQA_ARCHIVEDIR:-${OPENQA_BASEDIR:-/var/lib}/openqa/archive}/testresults" +#archive_preserved_important_jobs = 0 + +[job_settings_ui] +## Specify the keys of job settings which reference a file and should therefore be rendered +## as links to those files within the job settings tab. +## Directories should be under the `CASEDIR` root path or under the `data` folder of the `CASEDIR`. The `data` +## folder is used as default but it can be configured to cover needs of any test distribution. To change it, add the +## `default_data_dir` variable with the name of the directory. +#keys_to_render_as_links=YAML_SCHEDULE,AUTOYAST + +[hooks] +## Specify a custom hook script to be executed when a job *with* +## `_TRIGGER_JOB_DONE_HOOK=1` is done. The hook can also be enabled for jobs +## *without* `_TRIGGER_JOB_DONE_HOOK=0` that are done with a certain result (see +## further settings). +#job_done_hook = my-openqa-hook + +## Specify that `job_done_hook` should be executed when a job is done with the a +## certain result (unless the job setting `_TRIGGER_JOB_DONE_HOOK=0` is present). +## Note that `failed` in the following example can be replaced with any job +## result. +#job_done_hook_enable_failed = 1 + +## Specify a custom hook script in the format `job_done_hook_$result` to be +## called when a job is done with the a certain result (unless the job setting +## `_TRIGGER_JOB_DONE_HOOK=0` is present) . Any executable specified in the +## variable as absolute path or executable name in `$PATH` is called with the job +## ID as first and only parameter corresponding to the `$result`, for example: +#job_done_hook_failed = my-openqa-hook-failed + +## Configuration for InfluxDB routes +[influxdb] +## Specify Minion task names which should never be counted towards the total of number failed Minion jobs. +#ignored_failed_minion_jobs = influxdb-minion-fail-job-task-name + +## Configuration for scheduler +[scheduler] +## Specify how many days a job can stay in 'SCHEDULED' state, defaults to 7 days +#max_job_scheduled_time = 7 +## Specify how many jobs in total can run per webui instance. Defaults to -1 (no limit) +#max_running_jobs = -1 + +## Configuration of the label/bugref carry-over +[carry_over] +## The carry over goes though the list of previous jobs to search for comments to carry over. +## Specify at with depth this search will be aborted: +#lookup_depth = 10 +## Specify at how many state changes the search will be aborted (state = combination of failed/softfailed/skipped modules): +#state_changes_limit = 3 + +## Configuration for the OBS rsync plugin +[obs_rsync] +#project_status_url = %obs_instance%/build/%%PROJECT/_result; +#concurrency = 2 +#queue_limit = 200 +#retry_interval = 60 +#retry_max_count = 2 +#home = +#username = openqa-user +#ssh_key_file = ~/.ssh/id_rsa + +## Override form values for creating example test +#[test_preset example] +#title = Create example test +#info = Some info that will show up on the "Create … -> Example test" page +#casedir = https://github.com/os-autoinst/os-autoinst-distri-example.git +#distri = example +#build = openqa