From 5c6f5ebdd849813253cd3eacd1f619e982077dff Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 7 Mar 2025 11:40:33 +0100 Subject: [PATCH 01/27] paper initial commit --- paper/paper.bib | 59 ++++++++++++++++++++++++ paper/paper.md | 120 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 179 insertions(+) create mode 100644 paper/paper.bib create mode 100644 paper/paper.md diff --git a/paper/paper.bib b/paper/paper.bib new file mode 100644 index 0000000..4e4544a --- /dev/null +++ b/paper/paper.bib @@ -0,0 +1,59 @@ +@article{Pearson:2017, + url = {http://adsabs.harvard.edu/abs/2017arXiv170304627P}, + Archiveprefix = {arXiv}, + Author = {{Pearson}, S. and {Price-Whelan}, A.~M. and {Johnston}, K.~V.}, + Eprint = {1703.04627}, + Journal = {ArXiv e-prints}, + Keywords = {Astrophysics - Astrophysics of Galaxies}, + Month = mar, + Title = {{Gaps in Globular Cluster Streams: Pal 5 and the Galactic Bar}}, + Year = 2017 +} + +@book{Binney:2008, + url = {http://adsabs.harvard.edu/abs/2008gady.book.....B}, + Author = {{Binney}, J. and {Tremaine}, S.}, + Booktitle = {Galactic Dynamics: Second Edition, by James Binney and Scott Tremaine.~ISBN 978-0-691-13026-2 (HB).~Published by Princeton University Press, Princeton, NJ USA, 2008.}, + Publisher = {Princeton University Press}, + Title = {{Galactic Dynamics: Second Edition}}, + Year = 2008 +} + +@article{gaia, + author = {{Gaia Collaboration}}, + title = "{The Gaia mission}", + journal = {Astronomy and Astrophysics}, + archivePrefix = "arXiv", + eprint = {1609.04153}, + primaryClass = "astro-ph.IM", + keywords = {space vehicles: instruments, Galaxy: structure, astrometry, parallaxes, proper motions, telescopes}, + year = 2016, + month = nov, + volume = 595, + doi = {10.1051/0004-6361/201629272}, + url = {http://adsabs.harvard.edu/abs/2016A%26A...595A...1G}, +} + +@article{astropy, + author = {{Astropy Collaboration}}, + title = "{Astropy: A community Python package for astronomy}", + journal = {Astronomy and Astrophysics}, + archivePrefix = "arXiv", + eprint = {1307.6212}, + primaryClass = "astro-ph.IM", + keywords = {methods: data analysis, methods: miscellaneous, virtual observatory tools}, + year = 2013, + month = oct, + volume = 558, + doi = {10.1051/0004-6361/201322068}, + url = {http://adsabs.harvard.edu/abs/2013A%26A...558A..33A} +} + +@misc{fidgit, + author = {A. M. Smith and K. Thaney and M. Hahnel}, + title = {Fidgit: An ungodly union of GitHub and Figshare}, + year = {2020}, + publisher = {GitHub}, + journal = {GitHub repository}, + url = {https://github.com/arfon/fidgit} +} diff --git a/paper/paper.md b/paper/paper.md new file mode 100644 index 0000000..91e17f4 --- /dev/null +++ b/paper/paper.md @@ -0,0 +1,120 @@ +--- +title: 'Gala: A Python package for galactic dynamics' +tags: + - Python + - astronomy + - dynamics + - galactic dynamics + - milky way +authors: + - name: Adrian M. Price-Whelan + orcid: 0000-0000-0000-0000 + equal-contrib: true + affiliation: "1, 2" # (Multiple affiliations must be quoted) + - name: Author Without ORCID + equal-contrib: true # (This is how you can denote equal contributions between multiple authors) + affiliation: 2 + - name: Author with no affiliation + corresponding: true # (This is how to denote the corresponding author) + affiliation: 3 + - given-names: Ludwig + dropping-particle: van + surname: Beethoven + affiliation: 3 +affiliations: + - name: Lyman Spitzer, Jr. Fellow, Princeton University, United States + index: 1 + ror: 00hx57361 + - name: Institution Name, Country + index: 2 + - name: Independent Researcher, Country + index: 3 +date: 13 August 2017 +bibliography: paper.bib + +# Optional fields if submitting to a AAS journal too, see this blog post: +# https://blog.joss.theoj.org/2018/12/a-new-collaboration-with-aas-publishing +aas-doi: 10.3847/xxxxx <- update this with the DOI from AAS once you know it. +aas-journal: Astrophysical Journal <- The name of the AAS journal. +--- + +# Summary + +The forces on stars, galaxies, and dark matter under external gravitational +fields lead to the dynamical evolution of structures in the universe. The orbits +of these bodies are therefore key to understanding the formation, history, and +future state of galaxies. The field of "galactic dynamics," which aims to model +the gravitating components of galaxies to study their structure and evolution, +is now well-established, commonly taught, and frequently used in astronomy. +Aside from toy problems and demonstrations, the majority of problems require +efficient numerical tools, many of which require the same base code (e.g., for +performing numerical orbit integration). + +# Statement of need + +`Gala` is an Astropy-affiliated Python package for galactic dynamics. Python +enables wrapping low-level languages (e.g., C) for speed without losing +flexibility or ease-of-use in the user-interface. The API for `Gala` was +designed to provide a class-based and user-friendly interface to fast (C or +Cython-optimized) implementations of common operations such as gravitational +potential and force evaluation, orbit integration, dynamical transformations, +and chaos indicators for nonlinear dynamics. `Gala` also relies heavily on and +interfaces well with the implementations of physical units and astronomical +coordinate systems in the `Astropy` package [@astropy] (`astropy.units` and +`astropy.coordinates`). + +`Gala` was designed to be used by both astronomical researchers and by +students in courses on gravitational dynamics or astronomy. It has already been +used in a number of scientific publications [@Pearson:2017] and has also been +used in graduate courses on Galactic dynamics to, e.g., provide interactive +visualizations of textbook material [@Binney:2008]. The combination of speed, +design, and support for Astropy functionality in `Gala` will enable exciting +scientific explorations of forthcoming data releases from the *Gaia* mission +[@gaia] by students and experts alike. + +# Mathematics + +Single dollars ($) are required for inline mathematics e.g. $f(x) = e^{\pi/x}$ + +Double dollars make self-standing equations: + +$$\Theta(x) = \left\{\begin{array}{l} +0\textrm{ if } x < 0\cr +1\textrm{ else} +\end{array}\right.$$ + +You can also use plain \LaTeX for equations +\begin{equation}\label{eq:fourier} +\hat f(\omega) = \int_{-\infty}^{\infty} f(x) e^{i\omega x} dx +\end{equation} +and refer to \autoref{eq:fourier} from text. + +# Citations + +Citations to entries in paper.bib should be in +[rMarkdown](http://rmarkdown.rstudio.com/authoring_bibliographies_and_citations.html) +format. + +If you want to cite a software repository URL (e.g. something on GitHub without a preferred +citation) then you can do it with the example BibTeX entry below for @fidgit. + +For a quick reference, the following citation commands can be used: +- `@author:2001` -> "Author et al. (2001)" +- `[@author:2001]` -> "(Author et al., 2001)" +- `[@author1:2001; @author2:2001]` -> "(Author1 et al., 2001; Author2 et al., 2002)" + +# Figures + +Figures can be included like this: +![Caption for example figure.\label{fig:example}](figure.png) +and referenced from text using \autoref{fig:example}. + +Figure sizes can be customized by adding an optional second parameter: +![Caption for example figure.](figure.png){ width=20% } + +# Acknowledgements + +We acknowledge contributions from Brigitta Sipocz, Syrtis Major, and Semyeong +Oh, and support from Kathryn Johnston during the genesis of this project. + +# References From 2b6a537241671a50ab0d2bd43abd5eda89d51eb4 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 7 Mar 2025 11:43:55 +0100 Subject: [PATCH 02/27] commit hook --- .github/workflows/draft-pdf.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 .github/workflows/draft-pdf.yml diff --git a/.github/workflows/draft-pdf.yml b/.github/workflows/draft-pdf.yml new file mode 100644 index 0000000..b7053c0 --- /dev/null +++ b/.github/workflows/draft-pdf.yml @@ -0,0 +1,24 @@ +name: Draft PDF +on: [push] + +jobs: + paper: + runs-on: ubuntu-latest + name: Paper Draft + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Build draft PDF + uses: openjournals/openjournals-draft-action@master + with: + journal: joss + # This should be the path to the paper within your repo. + paper-path: paper/paper.md + - name: Upload + uses: actions/upload-artifact@v4 + with: + name: paper + # This is the output path where Pandoc will write the compiled + # PDF. Note, this should be the same directory as the input + # paper.md + path: paper/paper.pdf From 71cc855636eb445bd0dce775190de15f131170e3 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 7 Mar 2025 12:17:25 +0100 Subject: [PATCH 03/27] first steps --- paper/paper.md | 142 +++++++++++++++++-------------------------------- 1 file changed, 48 insertions(+), 94 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 91e17f4..f4ba7ae 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -1,120 +1,74 @@ --- -title: 'Gala: A Python package for galactic dynamics' +title: '`pytch`: A Real-Time Pitch Analysis Tool For Polyphonic Music' tags: - Python - - astronomy - - dynamics - - galactic dynamics - - milky way + - Audio Processing + - Music Information Retrieval + - Singing Voice Analysis + - GUI authors: - - name: Adrian M. Price-Whelan - orcid: 0000-0000-0000-0000 - equal-contrib: true - affiliation: "1, 2" # (Multiple affiliations must be quoted) - - name: Author Without ORCID - equal-contrib: true # (This is how you can denote equal contributions between multiple authors) - affiliation: 2 - - name: Author with no affiliation - corresponding: true # (This is how to denote the corresponding author) - affiliation: 3 - - given-names: Ludwig - dropping-particle: van - surname: Beethoven + - name: Sebastian Rosenzweig + orcid: 0000-0003-4964-9217 + corresponding: true + affiliation: 1 + - name: Marius Kriegerowski + orcid: + corresponding: false affiliation: 3 + - name: Lukas Dietz + orcid: + corresponding: false + affiliation: 5 + - name: Peter Meier + orcid: 0000-0002-3094-1931 + corresponding: false + affiliation: 2 + - name: Sebastian Strahl + orcid: 0009-0007-9654-7762 + corresponding: false + affiliation: 2 + - name: Frank Scherbaum + orcid: 0000-0002-5050-7331 + corresponding: false + affiliation: 2 + - name: Meinard Müller + orcid: 0000-0001-6062-7524 + corresponding: false + affiliation: 2 + + affiliations: - - name: Lyman Spitzer, Jr. Fellow, Princeton University, United States + - name: Audoo Ltd., London, UK index: 1 - ror: 00hx57361 - - name: Institution Name, Country + - name: International Audio Laboratories Erlangen, Germany index: 2 - - name: Independent Researcher, Country + - name: University of Potsdam, Germany index: 3 -date: 13 August 2017 + - name: Independent Researcher, Berlin, Germany + index: 4 + - name: Tantive GmbH, Nürnberg Germany + index: 5 +date: 7 March 2025 bibliography: paper.bib - -# Optional fields if submitting to a AAS journal too, see this blog post: -# https://blog.joss.theoj.org/2018/12/a-new-collaboration-with-aas-publishing -aas-doi: 10.3847/xxxxx <- update this with the DOI from AAS once you know it. -aas-journal: Astrophysical Journal <- The name of the AAS journal. --- # Summary -The forces on stars, galaxies, and dark matter under external gravitational -fields lead to the dynamical evolution of structures in the universe. The orbits -of these bodies are therefore key to understanding the formation, history, and -future state of galaxies. The field of "galactic dynamics," which aims to model -the gravitating components of galaxies to study their structure and evolution, -is now well-established, commonly taught, and frequently used in astronomy. -Aside from toy problems and demonstrations, the majority of problems require -efficient numerical tools, many of which require the same base code (e.g., for -performing numerical orbit integration). - -# Statement of need +# Graphical User Interface (GUI) +![`pytch` GUI.\label{fig:GUI}](pictures/screenshot.png){ width=90% } -`Gala` is an Astropy-affiliated Python package for galactic dynamics. Python -enables wrapping low-level languages (e.g., C) for speed without losing -flexibility or ease-of-use in the user-interface. The API for `Gala` was -designed to provide a class-based and user-friendly interface to fast (C or -Cython-optimized) implementations of common operations such as gravitational -potential and force evaluation, orbit integration, dynamical transformations, -and chaos indicators for nonlinear dynamics. `Gala` also relies heavily on and -interfaces well with the implementations of physical units and astronomical -coordinate systems in the `Astropy` package [@astropy] (`astropy.units` and -`astropy.coordinates`). +A screenshot of the main GUI is shown in Figure \autoref{fig:GUI}. -`Gala` was designed to be used by both astronomical researchers and by -students in courses on gravitational dynamics or astronomy. It has already been -used in a number of scientific publications [@Pearson:2017] and has also been -used in graduate courses on Galactic dynamics to, e.g., provide interactive -visualizations of textbook material [@Binney:2008]. The combination of speed, -design, and support for Astropy functionality in `Gala` will enable exciting -scientific explorations of forthcoming data releases from the *Gaia* mission -[@gaia] by students and experts alike. +# Audio Processing -# Mathematics +# Statement of Need -Single dollars ($) are required for inline mathematics e.g. $f(x) = e^{\pi/x}$ - -Double dollars make self-standing equations: - -$$\Theta(x) = \left\{\begin{array}{l} -0\textrm{ if } x < 0\cr -1\textrm{ else} -\end{array}\right.$$ - -You can also use plain \LaTeX for equations -\begin{equation}\label{eq:fourier} -\hat f(\omega) = \int_{-\infty}^{\infty} f(x) e^{i\omega x} dx -\end{equation} -and refer to \autoref{eq:fourier} from text. - -# Citations - -Citations to entries in paper.bib should be in -[rMarkdown](http://rmarkdown.rstudio.com/authoring_bibliographies_and_citations.html) -format. - -If you want to cite a software repository URL (e.g. something on GitHub without a preferred -citation) then you can do it with the example BibTeX entry below for @fidgit. - -For a quick reference, the following citation commands can be used: - `@author:2001` -> "Author et al. (2001)" - `[@author:2001]` -> "(Author et al., 2001)" - `[@author1:2001; @author2:2001]` -> "(Author1 et al., 2001; Author2 et al., 2002)" -# Figures - -Figures can be included like this: -![Caption for example figure.\label{fig:example}](figure.png) -and referenced from text using \autoref{fig:example}. - -Figure sizes can be customized by adding an optional second parameter: -![Caption for example figure.](figure.png){ width=20% } - # Acknowledgements -We acknowledge contributions from Brigitta Sipocz, Syrtis Major, and Semyeong -Oh, and support from Kathryn Johnston during the genesis of this project. +We would like to thank all singers that helped developing this tool. The International Audio Laboratories Erlangen are a joint institution of the Friedrich-Alexander-Universität Erlangen-Nürnberg (FAU) and Fraunhofer Institute for Integrated Circuits IIS. # References From 54ef74bdbe0b0b6c502679b4c6b576c1542718c9 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 7 Mar 2025 12:25:27 +0100 Subject: [PATCH 04/27] fixes --- paper/paper.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index f4ba7ae..7cd17a9 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -46,7 +46,7 @@ affiliations: index: 3 - name: Independent Researcher, Berlin, Germany index: 4 - - name: Tantive GmbH, Nürnberg Germany + - name: Tantive GmbH, Nürnberg, Germany index: 5 date: 7 March 2025 bibliography: paper.bib @@ -55,7 +55,7 @@ bibliography: paper.bib # Summary # Graphical User Interface (GUI) -![`pytch` GUI.\label{fig:GUI}](pictures/screenshot.png){ width=90% } +![`pytch` GUI.\label{fig:GUI}](../pictures/screenshot.png){ width=90% } A screenshot of the main GUI is shown in Figure \autoref{fig:GUI}. @@ -63,9 +63,7 @@ A screenshot of the main GUI is shown in Figure \autoref{fig:GUI}. # Statement of Need -- `@author:2001` -> "Author et al. (2001)" -- `[@author:2001]` -> "(Author et al., 2001)" -- `[@author1:2001; @author2:2001]` -> "(Author1 et al., 2001; Author2 et al., 2002)" +[@Binney:2008] # Acknowledgements From 65471ad6ea029d3e064ad6b4b2f407f324d2ac88 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 14 Mar 2025 12:41:05 +0100 Subject: [PATCH 05/27] new authors --- paper/compile.sh | 4 ++++ paper/paper.md | 10 +--------- 2 files changed, 5 insertions(+), 9 deletions(-) create mode 100644 paper/compile.sh diff --git a/paper/compile.sh b/paper/compile.sh new file mode 100644 index 0000000..7263c37 --- /dev/null +++ b/paper/compile.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +cd .. +docker run --rm -it -v "$PWD:/data" -u "$(id -u):$(id -g)" openjournals/inara -o pdf paper/paper.md -p diff --git a/paper/paper.md b/paper/paper.md index 7cd17a9..b1fdc7d 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -1,5 +1,5 @@ --- -title: '`pytch`: A Real-Time Pitch Analysis Tool For Polyphonic Music' +title: '`pytch`: A Real-Time Pitch Analysis Tool For Polyphonic Vocal Music' tags: - Python - Audio Processing @@ -19,14 +19,6 @@ authors: orcid: corresponding: false affiliation: 5 - - name: Peter Meier - orcid: 0000-0002-3094-1931 - corresponding: false - affiliation: 2 - - name: Sebastian Strahl - orcid: 0009-0007-9654-7762 - corresponding: false - affiliation: 2 - name: Frank Scherbaum orcid: 0000-0002-5050-7331 corresponding: false From f8d716fa21cfb8ecb20e511e1873374f069a11a6 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 14 Mar 2025 20:34:02 +0100 Subject: [PATCH 06/27] first steps --- LICENSE | 2 +- paper/paper.bib | 247 ++++++++++++++++++++++++++++++++++++------------ paper/paper.md | 35 ++++--- 3 files changed, 213 insertions(+), 71 deletions(-) diff --git a/LICENSE b/LICENSE index 03a5c24..5178239 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2016--2024 pytch development team. +Copyright (c) 2016--2025 pytch development team. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/paper/paper.bib b/paper/paper.bib index 4e4544a..aa4a1f2 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -1,59 +1,190 @@ -@article{Pearson:2017, - url = {http://adsabs.harvard.edu/abs/2017arXiv170304627P}, - Archiveprefix = {arXiv}, - Author = {{Pearson}, S. and {Price-Whelan}, A.~M. and {Johnston}, K.~V.}, - Eprint = {1703.04627}, - Journal = {ArXiv e-prints}, - Keywords = {Astrophysics - Astrophysics of Galaxies}, - Month = mar, - Title = {{Gaps in Globular Cluster Streams: Pal 5 and the Galactic Bar}}, - Year = 2017 -} - -@book{Binney:2008, - url = {http://adsabs.harvard.edu/abs/2008gady.book.....B}, - Author = {{Binney}, J. and {Tremaine}, S.}, - Booktitle = {Galactic Dynamics: Second Edition, by James Binney and Scott Tremaine.~ISBN 978-0-691-13026-2 (HB).~Published by Princeton University Press, Princeton, NJ USA, 2008.}, - Publisher = {Princeton University Press}, - Title = {{Galactic Dynamics: Second Edition}}, - Year = 2008 -} - -@article{gaia, - author = {{Gaia Collaboration}}, - title = "{The Gaia mission}", - journal = {Astronomy and Astrophysics}, - archivePrefix = "arXiv", - eprint = {1609.04153}, - primaryClass = "astro-ph.IM", - keywords = {space vehicles: instruments, Galaxy: structure, astrometry, parallaxes, proper motions, telescopes}, - year = 2016, - month = nov, - volume = 595, - doi = {10.1051/0004-6361/201629272}, - url = {http://adsabs.harvard.edu/abs/2016A%26A...595A...1G}, -} - -@article{astropy, - author = {{Astropy Collaboration}}, - title = "{Astropy: A community Python package for astronomy}", - journal = {Astronomy and Astrophysics}, - archivePrefix = "arXiv", - eprint = {1307.6212}, - primaryClass = "astro-ph.IM", - keywords = {methods: data analysis, methods: miscellaneous, virtual observatory tools}, - year = 2013, - month = oct, - volume = 558, - doi = {10.1051/0004-6361/201322068}, - url = {http://adsabs.harvard.edu/abs/2013A%26A...558A..33A} -} - -@misc{fidgit, - author = {A. M. Smith and K. Thaney and M. Hahnel}, - title = {Fidgit: An ungodly union of GitHub and Figshare}, - year = {2020}, - publisher = {GitHub}, - journal = {GitHub repository}, - url = {https://github.com/arfon/fidgit} +@article{BerglinPD22_VisualFeedback_JPM, +author = {Jacob Berglin and Peter Q Pfordresher and Steven Demorest}, +title = {The effect of visual and auditory feedback on adult poor-pitch remediation}, +journal = {Psychology of Music}, +volume = {50}, +number = {4}, +pages = {1077-1090}, +year = {2022}, +doi = {10.1177/03057356211026730}, +URL = {https://doi.org/10.1177/03057356211026730}, +} + +@inproceedings{RosenzweigSM22_libf0_ISMIR-LBD, +author = {Sebastian Rosenzweig and Simon Schw{\"a}r and Meinard M{\"u}ller}, +title = {libf0: A Python Library for Fundamental Frequency Estimation}, +booktitle = {Late Breaking Demos of the International Society for Music Information Retrieval Conference ({ISMIR})}, +address = {Bengaluru, India}, +year = {2022}, +url-pdf = {https://archives.ismir.net/ismir2022/latebreaking/000003.pdf}, +url-code = {https://github.com/groupmm/libf0/} +} + +@article{Boersma01_Praat_GI, + author = {Paul Boersma}, + journal = {Glot International}, + number = {9/10}, + pages = {341--345}, + title = {{Praat}, a system for doing phonetics by computer}, + volume = {5}, + year = {2001} +} + +@inproceedings{CannamLS10_SonicVisualizer_ICMC, + author = {Chris Cannam and Christian Landone and Mark B. Sandler}, + title = {Sonic {V}isualiser: An Open Source Application for Viewing, Analysing, and Annotating Music Audio Files}, + booktitle = {Proceedings of the International Conference on Multimedia}, + address = {Florence, Italy}, + pages = {1467--1468}, + year = {2010}, +} + +@article{SixCL13_Tarsos_JNMR, + author = {Joren Six and Olmo Cornelis and Marc Leman}, + title = {Tarsos, a Modular Platform for Precise Pitch Analysis of {W}estern and Non-{W}estern Music}, + journal = {Journal of New Music Research}, + volume = {42}, + number = {2}, + pages = {113-129}, + year = {2013}, + doi = {10.1080/09298215.2013.797999} +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@article{CheveigneK02_YIN_JASA, + author = {Alain de Cheveign{\'e} and Hideki Kawahara}, + title = {{YIN}, a fundamental frequency estimator for speech and music.}, + journal = {Journal of the Acoustical Society of America (JASA)}, + year = {2002}, + volume = {111}, + pages = {1917--1930}, + number = {4}, +} + +@inproceedings{MauchD14_pYIN_ICASSP, + author = {Matthias Mauch and Simon Dixon}, + title = {{pYIN}: A Fundamental Frequency Estimator Using Probabilistic Threshold Distributions}, + booktitle = {{IEEE} International Conference on Acoustics, Speech and Signal Processing ({ICASSP})}, + year = {2014}, + address = {Florence, Italy}, + pages = {659--663}, +} + +@article{SalamonG12_MelodyExtraction_TASLP, + Author = {Justin Salamon and Emilia G{\'o}mez}, + Title = {Melody Extraction from Polyphonic Music Signals using Pitch Contour Characteristics}, + Journal = {IEEE Transactions on Audio, Speech, and Language Processing}, + Number = {6}, + Volume = {20}, + Pages = {1759--1770}, + Year = {2012}, + doi = {10.1109/TASL.2012.2188515} +} + +@article{CamachoH08_SawtoothWaveform_JASA, + author = {Arturo Camacho and John G. Harris}, + title = {A sawtooth waveform inspired pitch estimator for speech and music}, + publisher = {ASA}, + year = {2008}, + journal = {The Journal of the Acoustical Society of America}, + volume = {124}, + number = {3}, + pages = {1638--1652}, +} + +@inproceedings{BittnerFRJCK19_mirdata_ISMIR, + author = {Rachel M. Bittner and Magdalena Fuentes and David Rubinstein and Andreas Jansson and Keunwoo Choi and Thor Kell}, + title = {{mirdata}: Software for Reproducible Usage of Datasets}, + booktitle = {Proceedings of the International Society for Music Information Retrieval Conference ({ISMIR})}, + pages = {99--106}, + year = {2019}, + address = {Delft, The Netherlands}, + url = {http://archives.ismir.net/ismir2019/paper/000009.pdf} +} + +@inproceedings{RaffelMHSNLE14_MirEval_ISMIR, + author = {Colin Raffel and Brian McFee and Eric J. Humphrey and Justin Salamon and Oriol Nieto and Dawen Liang and Daniel P. W. Ellis}, + title = {{MIR{\_}EVAL}: {A} Transparent Implementation of Common {MIR} Metrics}, + pages = {367--372}, + booktitle = {Proceedings of the International Society for Music Information Retrieval Conference ({ISMIR})}, + address = {Taipei, Taiwan}, + year = {2014}, +} + +@article{RosenzweigCWSGM20_DCS_TISMIR, + author = {Sebastian Rosenzweig and Helena Cuesta and Christof Wei{\ss} and Frank Scherbaum and Emilia G{\'o}mez and Meinard M{\"u}ller}, + title = {{D}agstuhl {ChoirSet}: {A} Multitrack Dataset for {MIR} Research on Choral Singing}, + journal = {Transactions of the International Society for Music Information Retrieval ({TISMIR})}, + volume = {3}, + number = {1}, + year = {2020}, + pages = {98--110}, + publisher = {Ubiquity Press}, + doi = {10.5334/tismir.48}, + url-pdf = {2020_RosenzweigCWSGM_DagstuhlChoirSet_TISMIR_ePrint.pdf}, + url-demo = {https://www.audiolabs-erlangen.de/resources/MIR/2020-DagstuhlChoirSet} +} + +@inproceedings{BittnerSBB17_PitchContours_AES, + author = {Rachel M. Bittner and Justin Salamon and Juan J. Bosch and Juan Pablo Bello}, + title = {Pitch Contours as a Mid-Level Representation for Music Informatics}, + booktitle = {Proceedings of the {AES} International Conference on Semantic Audio}, + address = {Erlangen, Germany}, + pages = {100--107}, + year = {2017}, + url = {http://www.aes.org/e-lib/browse.cfm?elib=18756} +} + +@inproceedings{RosenzweigSM21_F0Reliability_ICASSP, + author = {Sebastian Rosenzweig and Frank Scherbaum and Meinard M{\"u}ller}, + title = {Reliability Assessment of Singing Voice {F0}-Estimates Using Multiple Algorithms}, + booktitle = {Proceedings of the {IEEE} International Conference on Acoustics, Speech, and Signal Processing ({ICASSP})}, + pages = {261--265}, + address = {Toronto, Canada}, + year = {2021}, + doi = {10.1109/ICASSP39728.2021.9413372} +} + +@book{Mueller21_FMP_SPRINGER, + author = {Meinard M\"{u}ller}, + title = {Fundamentals of Music Processing -- Using Python and Jupyter Notebooks}, + type = {Monograph}, + year = {2021}, + isbn = {978-3-030-69807-2}, + publisher = {Springer Verlag}, + edition = {2nd}, + pages = {1--495}, + doi = {10.1007/978-3-030-69808-9}, + url-details = {http://www.music-processing.de} } diff --git a/paper/paper.md b/paper/paper.md index b1fdc7d..cf502d3 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -1,5 +1,5 @@ --- -title: '`pytch`: A Real-Time Pitch Analysis Tool For Polyphonic Vocal Music' +title: '`pytch`: A Real-Time Analysis Tool For Polyphonic Singing' tags: - Python - Audio Processing @@ -10,19 +10,19 @@ authors: - name: Sebastian Rosenzweig orcid: 0000-0003-4964-9217 corresponding: true - affiliation: 1 + affiliation: "1,2" - name: Marius Kriegerowski orcid: corresponding: false - affiliation: 3 + affiliation: 4 - name: Lukas Dietz orcid: corresponding: false - affiliation: 5 + affiliation: "1,5" - name: Frank Scherbaum orcid: 0000-0002-5050-7331 corresponding: false - affiliation: 2 + affiliation: 3 - name: Meinard Müller orcid: 0000-0001-6062-7524 corresponding: false @@ -30,11 +30,11 @@ authors: affiliations: - - name: Audoo Ltd., London, UK + - name: Audoo Ltd., London, United Kingdom index: 1 - - name: International Audio Laboratories Erlangen, Germany + - name: International Audio Laboratories Erlangen, Erlangen, Germany index: 2 - - name: University of Potsdam, Germany + - name: University of Potsdam, Potsdam, Germany index: 3 - name: Independent Researcher, Berlin, Germany index: 4 @@ -45,20 +45,31 @@ bibliography: paper.bib --- # Summary +Polyphonic singing is one of the most widespread forms of music-making. During a vocal performance, singers must constantly adjust their pitch to stay in tune with one another — a complex skill that often requires extensive practice and guidance from a conductor or experienced lead singer. Recent research suggests that a singer can improve their tuning during rehearsals when given visual feedback in real time [@BerglinPD22_VisualFeedback_JPM], e.g., through watching a visualization of the fundamental frequency (F0) trajectory which corresponds to the pitch progression of the singing voice. In the context of polyphonic singing, real-time analysis of all voices together is essential to assess the complex interactions and provide meaningful feedback. To this end, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record and analyze multiple voices in real time through multichannel processing. The tool displays vocal spectra and estimated F0-trajectories for all singers, as well as the harmonic intervals between them. Furthermore, the user can interactively tune visual and algorithmic parameters to adapt to different input devices, microphone signals, singing styles, and use cases. Written in Python, `pytch` utilizes the `libf0` library [@RosenzweigSM22_libf0_ISMIR-LBD] for F0-estimation and the `pyqtgraph` library [^1] to visualize the analysis results. + +[^1]: + +# Statement of Need +Various tools for singing analysis exist, ranging from open-source research platforms like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], to commercial applications such as Singstar[^2] or Singing Carrots [^3]. However, these tools face several limitations when applied to polyphonic singing. Most notably, many tools can only process a single voice at a time, which is problematic for analyzing the interactions between voices in a group performance. Additionally, real-time feedback is often missing, preventing an effective feedback loop between singers and their tool. Furthermore, tools that rely on a score as a reference pose challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether. To address these challenges, we developed `pytch`. Unlike existing tools, `pytch` enables the analysis of multiple voices simultaneously in real time without classifying singing as simply “correct” or “incorrect.” Instead, it serves as an objective, score-independent measurement tool for singers and conductors to assess and improve their collective tuning. Additionally, `pytch` offers a platform for developing and testing real-time audio processing algorithms, such as for F0-estimation. + +[^2]: +[^3]: + +# Audio Processing +* +YIN [@CheveigneK02_YIN_JASA] # Graphical User Interface (GUI) ![`pytch` GUI.\label{fig:GUI}](../pictures/screenshot.png){ width=90% } A screenshot of the main GUI is shown in Figure \autoref{fig:GUI}. -# Audio Processing +While there is no hard limit on the number of channels, we recommend to use up to four input channels to ensure visibility of the charts and responsiveness of the GUI. -# Statement of Need -[@Binney:2008] # Acknowledgements - +**DFG? Uni Potsdam?** We would like to thank all singers that helped developing this tool. The International Audio Laboratories Erlangen are a joint institution of the Friedrich-Alexander-Universität Erlangen-Nürnberg (FAU) and Fraunhofer Institute for Integrated Circuits IIS. # References From a4db85ec57ed798761a9cdc2a9609385731cb463 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Sun, 16 Mar 2025 18:26:31 +0100 Subject: [PATCH 07/27] new intro --- paper/paper.bib | 10 ++++++++++ paper/paper.md | 23 ++++++++++++++--------- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index aa4a1f2..66e2706 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -10,6 +10,16 @@ @article{BerglinPD22_VisualFeedback_JPM URL = {https://doi.org/10.1177/03057356211026730}, } +@inproceedings{MeierSM25_RealTimeF0_ISMIR, +author = {Peter Meier and Sebastian Strahl and Simon Schw{\"a}r and Meinard M{\"u}ller}, +title = {libf0-realtime: TODO}, +booktitle = {Submitted to the International Society for Music Information Retrieval Conference ({ISMIR})}, +address = {}, +year = {2025}, +url-pdf = {}, +url-code = {} +} + @inproceedings{RosenzweigSM22_libf0_ISMIR-LBD, author = {Sebastian Rosenzweig and Simon Schw{\"a}r and Meinard M{\"u}ller}, title = {libf0: A Python Library for Fundamental Frequency Estimation}, diff --git a/paper/paper.md b/paper/paper.md index cf502d3..2c6f7be 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -1,5 +1,5 @@ --- -title: '`pytch`: A Real-Time Analysis Tool For Polyphonic Singing' +title: '`pytch`: A Real-Time Monitoring Tool For Polyphonic Singing Performances' tags: - Python - Audio Processing @@ -18,15 +18,15 @@ authors: - name: Lukas Dietz orcid: corresponding: false - affiliation: "1,5" - - name: Frank Scherbaum - orcid: 0000-0002-5050-7331 - corresponding: false - affiliation: 3 + affiliation: 2 - name: Meinard Müller orcid: 0000-0001-6062-7524 corresponding: false affiliation: 2 + - name: Frank Scherbaum + orcid: 0000-0002-5050-7331 + corresponding: false + affiliation: 3 affiliations: @@ -45,12 +45,17 @@ bibliography: paper.bib --- # Summary -Polyphonic singing is one of the most widespread forms of music-making. During a vocal performance, singers must constantly adjust their pitch to stay in tune with one another — a complex skill that often requires extensive practice and guidance from a conductor or experienced lead singer. Recent research suggests that a singer can improve their tuning during rehearsals when given visual feedback in real time [@BerglinPD22_VisualFeedback_JPM], e.g., through watching a visualization of the fundamental frequency (F0) trajectory which corresponds to the pitch progression of the singing voice. In the context of polyphonic singing, real-time analysis of all voices together is essential to assess the complex interactions and provide meaningful feedback. To this end, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record and analyze multiple voices in real time through multichannel processing. The tool displays vocal spectra and estimated F0-trajectories for all singers, as well as the harmonic intervals between them. Furthermore, the user can interactively tune visual and algorithmic parameters to adapt to different input devices, microphone signals, singing styles, and use cases. Written in Python, `pytch` utilizes the `libf0` library [@RosenzweigSM22_libf0_ISMIR-LBD] for F0-estimation and the `pyqtgraph` library [^1] to visualize the analysis results. +Polyphonic singing is one of the most widespread forms of music-making. During a performance, singers must constantly adjust their pitch to stay in tune with one another — a complex skill that requires extensive practice. Research has shown that tools offering visual feedback can assist singers in fine-tuning their intonation during a performance [@BerglinPD22_VisualFeedback_JPM]. Specifically, real-time visualizations of the fundamental frequency (F0), which represents the pitch of the singing voice, help singers in assessing their pitch relative to a fixed reference or other voices. +To support the monitoring of polyphonic singing performances, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record, process and visualize multiple voices in real time. The GUI displays vocal spectra and estimated F0-trajectories for all singers, as well as the harmonic intervals between them. Additionally, users can adjust visual and algorithmic parameters interactively to accommodate different input devices, microphone signals, singing styles and use cases. Written in Python, `pytch` utilizes the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR] for real-time F0-estimation and the `pyqtgraph` library [^1] to visualize the analysis results. [^1]: # Statement of Need -Various tools for singing analysis exist, ranging from open-source research platforms like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], to commercial applications such as Singstar[^2] or Singing Carrots [^3]. However, these tools face several limitations when applied to polyphonic singing. Most notably, many tools can only process a single voice at a time, which is problematic for analyzing the interactions between voices in a group performance. Additionally, real-time feedback is often missing, preventing an effective feedback loop between singers and their tool. Furthermore, tools that rely on a score as a reference pose challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether. To address these challenges, we developed `pytch`. Unlike existing tools, `pytch` enables the analysis of multiple voices simultaneously in real time without classifying singing as simply “correct” or “incorrect.” Instead, it serves as an objective, score-independent measurement tool for singers and conductors to assess and improve their collective tuning. Additionally, `pytch` offers a platform for developing and testing real-time audio processing algorithms, such as for F0-estimation. +Software for assessing a singing voice performance +Various tools for singing performance analysis exist, ranging from open-source research platforms like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], to commercial applications such as Singstar[^2] or Singing Carrots [^3]. However, these tools face several limitations when applied to polyphonic singing. Most notably, many tools can only process a single voice at a time, which is problematic for analyzing the interactions between voices in a group performance. Additionally, real-time feedback is often missing, preventing an effective feedback loop between singers and their tool. Furthermore, tools that rely on a score as a reference pose challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether [@ScherbaumMRM19_MultimediaRecordings_FMA]. To address these challenges, we developed `pytch`. Unlike existing tools, `pytch` enables monitoring multiple voices simultaneously in real time + + +without classifying singing as simply “correct” or “incorrect.” Instead, it serves as an objective, score-independent measurement tool for singers and conductors to assess and improve their collective tuning. Additionally, `pytch` offers a platform for developing and testing real-time audio processing algorithms, such as for F0-estimation. [^2]: [^3]: @@ -70,6 +75,6 @@ While there is no hard limit on the number of channels, we recommend to use up t # Acknowledgements **DFG? Uni Potsdam?** -We would like to thank all singers that helped developing this tool. The International Audio Laboratories Erlangen are a joint institution of the Friedrich-Alexander-Universität Erlangen-Nürnberg (FAU) and Fraunhofer Institute for Integrated Circuits IIS. +We would like to thank all the singers who contributed to testing `pytch` during its development. The International Audio Laboratories Erlangen are a joint institution of the Friedrich-Alexander-Universität Erlangen-Nürnberg (FAU) and Fraunhofer Institute for Integrated Circuits IIS. # References From 1a187a95ede1f39fe90947c6421c22a2bc434f1d Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Thu, 20 Mar 2025 22:26:07 +0100 Subject: [PATCH 08/27] minor --- paper/paper.bib | 11 +++++++++++ paper/paper.md | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/paper/paper.bib b/paper/paper.bib index 66e2706..7ceb81b 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -20,6 +20,17 @@ @inproceedings{MeierSM25_RealTimeF0_ISMIR url-code = {} } + +@inproceedings{ScherbaumMRM19_MultimediaRecordings_FMA, + author = {Frank Scherbaum and Nana Mzhavanadze and Sebastian Rosenzweig and Meinard M{\"u}ller}, + title = {Multi-media recordings of traditional {G}eorgian vocal music for computational analysis}, + booktitle = {Proceedings of the International Workshop on Folk Music Analysis ({FMA})}, + address = {Birmingham, UK}, + year = {2019}, + pages = {1--6} +} + + @inproceedings{RosenzweigSM22_libf0_ISMIR-LBD, author = {Sebastian Rosenzweig and Simon Schw{\"a}r and Meinard M{\"u}ller}, title = {libf0: A Python Library for Fundamental Frequency Estimation}, diff --git a/paper/paper.md b/paper/paper.md index 2c6f7be..00428ae 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -51,7 +51,7 @@ To support the monitoring of polyphonic singing performances, we developed `pytc [^1]: # Statement of Need -Software for assessing a singing voice performance +There exist a wide range of software for assessing a singing voice performances. Most prominently, Karaoke applications like Let's Sing or Rock Bands, which compare the singing voice to a score representation of the singing part. Various tools for singing performance analysis exist, ranging from open-source research platforms like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], to commercial applications such as Singstar[^2] or Singing Carrots [^3]. However, these tools face several limitations when applied to polyphonic singing. Most notably, many tools can only process a single voice at a time, which is problematic for analyzing the interactions between voices in a group performance. Additionally, real-time feedback is often missing, preventing an effective feedback loop between singers and their tool. Furthermore, tools that rely on a score as a reference pose challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether [@ScherbaumMRM19_MultimediaRecordings_FMA]. To address these challenges, we developed `pytch`. Unlike existing tools, `pytch` enables monitoring multiple voices simultaneously in real time From dc762d3b4cfffc9c8b611059a6004a8f0d06fd46 Mon Sep 17 00:00:00 2001 From: Sebastian Rosenzweig Date: Fri, 21 Mar 2025 11:47:07 +0100 Subject: [PATCH 09/27] statement of need and audio processing --- paper/paper.md | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 00428ae..57d1832 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -45,24 +45,22 @@ bibliography: paper.bib --- # Summary -Polyphonic singing is one of the most widespread forms of music-making. During a performance, singers must constantly adjust their pitch to stay in tune with one another — a complex skill that requires extensive practice. Research has shown that tools offering visual feedback can assist singers in fine-tuning their intonation during a performance [@BerglinPD22_VisualFeedback_JPM]. Specifically, real-time visualizations of the fundamental frequency (F0), which represents the pitch of the singing voice, help singers in assessing their pitch relative to a fixed reference or other voices. +Polyphonic singing is one of the most widespread forms of music-making. During a performance, singers must constantly adjust their pitch to stay in tune with one another — a complex skill that requires extensive practice. Research has shown that pitch monitoring tools can assist singers in fine-tuning their intonation during a performance [@BerglinPD22_VisualFeedback_JPM]. Specifically, real-time visualizations of the fundamental frequency (F0), which represents the pitch of the singing voice, help singers in assessing their pitch relative to a fixed reference or other voices. To support the monitoring of polyphonic singing performances, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record, process and visualize multiple voices in real time. The GUI displays vocal spectra and estimated F0-trajectories for all singers, as well as the harmonic intervals between them. Additionally, users can adjust visual and algorithmic parameters interactively to accommodate different input devices, microphone signals, singing styles and use cases. Written in Python, `pytch` utilizes the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR] for real-time F0-estimation and the `pyqtgraph` library [^1] to visualize the analysis results. [^1]: # Statement of Need -There exist a wide range of software for assessing a singing voice performances. Most prominently, Karaoke applications like Let's Sing or Rock Bands, which compare the singing voice to a score representation of the singing part. -Various tools for singing performance analysis exist, ranging from open-source research platforms like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], to commercial applications such as Singstar[^2] or Singing Carrots [^3]. However, these tools face several limitations when applied to polyphonic singing. Most notably, many tools can only process a single voice at a time, which is problematic for analyzing the interactions between voices in a group performance. Additionally, real-time feedback is often missing, preventing an effective feedback loop between singers and their tool. Furthermore, tools that rely on a score as a reference pose challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether [@ScherbaumMRM19_MultimediaRecordings_FMA]. To address these challenges, we developed `pytch`. Unlike existing tools, `pytch` enables monitoring multiple voices simultaneously in real time +Software that assesses the pitch of a singing voice in real time is mostly known from Karaoke applications, such as Let's Sing[^2] or Rock Band[^3], where the singer sings along a backing track and the pitch is compared to a score reference in order to analyse whether the sung notes are "correct" or "incorrect". However, these applications face several limitations when applied to polyphonic singing. Most notably, many Karaoke systems can only process one or two singing voices at a time, which is problematic for analyzing group performances. Additionally, software that rely on a score as a reference pose challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether [@ScherbaumMRM19_MultimediaRecordings_FMA]. Finally, existing open-source research software for singing voice processing, like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], lack real-time feedback, preventing an effective feedback loop between singers and their tool. +To address these challenges, we developed `pytch`. Unlike existing tools, `pytch` enables monitoring multiple voices simultaneously in real time. Rather than classifying singing as simply “correct” or “incorrect”, it serves as an objective monitoring tool for singers and conductors to assess, discuss, and improve their collective tuning. Additionally, `pytch` offers researchers in music information retrieval a platform for developing and testing real-time audio processing algorithms, such as for F0-estimation. +[^2]: +[^3]: -without classifying singing as simply “correct” or “incorrect.” Instead, it serves as an objective, score-independent measurement tool for singers and conductors to assess and improve their collective tuning. Additionally, `pytch` offers a platform for developing and testing real-time audio processing algorithms, such as for F0-estimation. +# Audio Processing -[^2]: -[^3]: +In the following, we describe the real-time audio processing pipeline of `pytch`. As shown in Figure X, it consists of two main processes, the recording and the analysis process. The recording process records multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. For each channel, it computes the audio level in dBFS, magnitude short-time fourier transform (also referred to as spectrogram), and an estimate of the F0 using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0-estimation algorithms, such as YIN[@CheveigneK02_YIN_JASA] and SWIPE. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. The F0-estimates, which are natively computed in the unit Hz are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and singing voice, estimated F0s may exhibit sudden jumps or incontinuities that result in hard-to read trajectories. To this end, the processing includes to optional filtering operations, one for smoothing using a median filter and one to remove pitch slide artifacts using a gradient filter. As a last audio feature, the harmonic intervals between the F0-trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. Once the processing is completed, the audio pipeline informs the GUI via a flag that new data for visualization is available. -# Audio Processing -* -YIN [@CheveigneK02_YIN_JASA] # Graphical User Interface (GUI) ![`pytch` GUI.\label{fig:GUI}](../pictures/screenshot.png){ width=90% } From b60705a9717d95188fc1edd5062e3877eafba677 Mon Sep 17 00:00:00 2001 From: Sebastian Rosenzweig Date: Mon, 24 Mar 2025 08:53:35 +0100 Subject: [PATCH 10/27] GUI description --- paper/paper.md | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 57d1832..32d7d30 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -54,6 +54,10 @@ To support the monitoring of polyphonic singing performances, we developed `pytc Software that assesses the pitch of a singing voice in real time is mostly known from Karaoke applications, such as Let's Sing[^2] or Rock Band[^3], where the singer sings along a backing track and the pitch is compared to a score reference in order to analyse whether the sung notes are "correct" or "incorrect". However, these applications face several limitations when applied to polyphonic singing. Most notably, many Karaoke systems can only process one or two singing voices at a time, which is problematic for analyzing group performances. Additionally, software that rely on a score as a reference pose challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether [@ScherbaumMRM19_MultimediaRecordings_FMA]. Finally, existing open-source research software for singing voice processing, like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], lack real-time feedback, preventing an effective feedback loop between singers and their tool. To address these challenges, we developed `pytch`. Unlike existing tools, `pytch` enables monitoring multiple voices simultaneously in real time. Rather than classifying singing as simply “correct” or “incorrect”, it serves as an objective monitoring tool for singers and conductors to assess, discuss, and improve their collective tuning. Additionally, `pytch` offers researchers in music information retrieval a platform for developing and testing real-time audio processing algorithms, such as for F0-estimation. +* More explicit use case rehearsal +* more explicit use case tech +* 2 target audiences/users + [^2]: [^3]: @@ -61,13 +65,20 @@ To address these challenges, we developed `pytch`. Unlike existing tools, `pytch In the following, we describe the real-time audio processing pipeline of `pytch`. As shown in Figure X, it consists of two main processes, the recording and the analysis process. The recording process records multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. For each channel, it computes the audio level in dBFS, magnitude short-time fourier transform (also referred to as spectrogram), and an estimate of the F0 using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0-estimation algorithms, such as YIN[@CheveigneK02_YIN_JASA] and SWIPE. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. The F0-estimates, which are natively computed in the unit Hz are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and singing voice, estimated F0s may exhibit sudden jumps or incontinuities that result in hard-to read trajectories. To this end, the processing includes to optional filtering operations, one for smoothing using a median filter and one to remove pitch slide artifacts using a gradient filter. As a last audio feature, the harmonic intervals between the F0-trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. Once the processing is completed, the audio pipeline informs the GUI via a flag that new data for visualization is available. +* refer to basic literature in audio processing +* peter meier paper +* explain confidence here? explain artifacts? + # Graphical User Interface (GUI) + +In the following, we explain step-by-step the GUI of `pytch`. Right after the program start, a startup menu opens in which the user is asked to specify the soundcard, the input channels, the sampling rate, and the window size for processing. Furthermore, the user can choose to store the recorded audio and the F0-trajectories on disk. These initial settings are important tfor initialising the audio processing module and the main GUI which is loaded when the user clicks `ok`. While there is no hard limit on the number of channels, we recommend to use up to four input channels to ensure visibility of the charts and responsiveness of the GUI. A screenshot of the main GUI is shown in Figure \autoref{fig:GUI}. + ![`pytch` GUI.\label{fig:GUI}](../pictures/screenshot.png){ width=90% } -A screenshot of the main GUI is shown in Figure \autoref{fig:GUI}. +The GUI is divided in three horizontal parts. The left side contains a menu that includes a start/stop button, and controls to adjust the GUI appearance and tune the algorithmic parameters. In the center of the `pytch` GUI, we find the so called "channel views" - visualisations in a dedicated color for each channel consisting of a level meter for microphone gain control, a spectrum visualisation with the current F0-value marked with a vertical line, and a spectrogram visualisation. The menu on the left can be used to enable or disable views, change the magnitude scaling of the spectrum and the spectrogram, and change the visible frequency range. The channels are ordered from top to bottom in order of selection in the startup menu. Optionally, the bottom channel can show a product of all channels. -While there is no hard limit on the number of channels, we recommend to use up to four input channels to ensure visibility of the charts and responsiveness of the GUI. +The right side of the `pytch` GUI contains the so called "trajectory view" which, depending on the chosen tab, either visualizes the F0-trajectories of all voices ("pitches"), or the harmonic intervals between the voices ("differential"). Using the controls in the left-side menu, the user can select the F0-estimation algorithm and adjust the visualisation using three parameters. First, the user can increase "confidence threshold" parameter to reduce potential noisy F0-estimates. Second, the median smoothing parameter allows to smoothen the displayed F0-trajectories - the larger the filter size the smoother. Thirs, the pitchslide tolerance defines From 7579f0e80784ecbcac32a9fa0a9114111b1adda5 Mon Sep 17 00:00:00 2001 From: Sebastian Rosenzweig Date: Tue, 1 Apr 2025 08:42:26 +0200 Subject: [PATCH 11/27] improved motivation and descriptions --- paper/paper.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index 32d7d30..e6e497f 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -51,23 +51,23 @@ To support the monitoring of polyphonic singing performances, we developed `pytc [^1]: # Statement of Need -Software that assesses the pitch of a singing voice in real time is mostly known from Karaoke applications, such as Let's Sing[^2] or Rock Band[^3], where the singer sings along a backing track and the pitch is compared to a score reference in order to analyse whether the sung notes are "correct" or "incorrect". However, these applications face several limitations when applied to polyphonic singing. Most notably, many Karaoke systems can only process one or two singing voices at a time, which is problematic for analyzing group performances. Additionally, software that rely on a score as a reference pose challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether [@ScherbaumMRM19_MultimediaRecordings_FMA]. Finally, existing open-source research software for singing voice processing, like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], lack real-time feedback, preventing an effective feedback loop between singers and their tool. -To address these challenges, we developed `pytch`. Unlike existing tools, `pytch` enables monitoring multiple voices simultaneously in real time. Rather than classifying singing as simply “correct” or “incorrect”, it serves as an objective monitoring tool for singers and conductors to assess, discuss, and improve their collective tuning. Additionally, `pytch` offers researchers in music information retrieval a platform for developing and testing real-time audio processing algorithms, such as for F0-estimation. +Software that assesses the pitch of a singing voice in real time is mostly known from Karaoke singing applications, such as Let's Sing[^2], Rock Band[^3], or Cantamus[^4], where the singer sings along a backing track and the pitch is compared to a score reference in order to analyse whether the sung notes are "correct" or "incorrect". However, these applications face several limitations when applied to polyphonic singing. Most notably, many Karaoke systems can only process one or two singing voices at a time, which is problematic for analyzing group performances. Additionally, software that rely on a score as a reference pose challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether [@ScherbaumMRM19_MultimediaRecordings_FMA]. Finally, existing open-source research software for singing voice processing, like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], lack real-time feedback, preventing an effective feedback loop between singers and their tool. -* More explicit use case rehearsal -* more explicit use case tech -* 2 target audiences/users +To address these challenges, we developed `pytch`. During rehearsals, singers and conductors can use our tool to analyze and improve several aspects of the performance. E.g., the vocal spectra can help singers to fine-tune the expression of formant frequencies. Also, melodic and harmonic problems can be identified with the help of the displayed F0-trajectories and the harmonic intervals. Unlike existing tools, `pytch` doesn't require score information and is thus suited for use with various singing genres. Apart from its use in practice, `pytch` can also serve as a platform for conducting music processing research on real-time algorithms. Real-time audio processing comes along with two main challenges. First, the audio is processed in short chunks, which provide limited audio context for the algorithms. Second, the audio needs to be processed fast enough to enable a smooth vsiualisation. Reserachers in this field can use `pytch` to develop, test, and compare real-time audio processing algorithms such as for F0-estimation or for signal enhancement. [^2]: [^3]: +[^4]: + +* peter meier paper? # Audio Processing -In the following, we describe the real-time audio processing pipeline of `pytch`. As shown in Figure X, it consists of two main processes, the recording and the analysis process. The recording process records multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. For each channel, it computes the audio level in dBFS, magnitude short-time fourier transform (also referred to as spectrogram), and an estimate of the F0 using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0-estimation algorithms, such as YIN[@CheveigneK02_YIN_JASA] and SWIPE. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. The F0-estimates, which are natively computed in the unit Hz are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and singing voice, estimated F0s may exhibit sudden jumps or incontinuities that result in hard-to read trajectories. To this end, the processing includes to optional filtering operations, one for smoothing using a median filter and one to remove pitch slide artifacts using a gradient filter. As a last audio feature, the harmonic intervals between the F0-trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. Once the processing is completed, the audio pipeline informs the GUI via a flag that new data for visualization is available. +The real-time audio processing pipeline implemented in the file `audio.py` is the heart of `pytch`. As shown in Figure X, the audio processing consists of two main processes, the recording and the analysis process. The recording process records multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. + +For each channel, the analysis process computes the audio level in dBFS, magnitude short-time fourier transform (also referred to as spectrogram, see FMP for more information on basic audio processing algorithms), and an estimate of the F0 along with a confidence value using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0-estimation algorithms, such as YIN[@CheveigneK02_YIN_JASA] and SWIPE. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. The F0-estimates, which are natively computed in the unit Hz are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and singing voice, estimated F0s may exhibit several artifacts such as incontinuities or slides which can make the trajectories hard to interpret. To this end, `pytch` offers optional cleaning operations, e.g. a threshold to remove estimated with low confidence, a median filter to smooth the trajectories and a gradient filter to remove trajectory slides. As a last audio feature, the harmonic intervals between the F0-trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. Once the processing is completed, the audio pipeline informs the GUI via a flag that new data for visualization is available. * refer to basic literature in audio processing -* peter meier paper -* explain confidence here? explain artifacts? # Graphical User Interface (GUI) @@ -78,9 +78,9 @@ In the following, we explain step-by-step the GUI of `pytch`. Right after the pr The GUI is divided in three horizontal parts. The left side contains a menu that includes a start/stop button, and controls to adjust the GUI appearance and tune the algorithmic parameters. In the center of the `pytch` GUI, we find the so called "channel views" - visualisations in a dedicated color for each channel consisting of a level meter for microphone gain control, a spectrum visualisation with the current F0-value marked with a vertical line, and a spectrogram visualisation. The menu on the left can be used to enable or disable views, change the magnitude scaling of the spectrum and the spectrogram, and change the visible frequency range. The channels are ordered from top to bottom in order of selection in the startup menu. Optionally, the bottom channel can show a product of all channels. -The right side of the `pytch` GUI contains the so called "trajectory view" which, depending on the chosen tab, either visualizes the F0-trajectories of all voices ("pitches"), or the harmonic intervals between the voices ("differential"). Using the controls in the left-side menu, the user can select the F0-estimation algorithm and adjust the visualisation using three parameters. First, the user can increase "confidence threshold" parameter to reduce potential noisy F0-estimates. Second, the median smoothing parameter allows to smoothen the displayed F0-trajectories - the larger the filter size the smoother. Thirs, the pitchslide tolerance defines - +The right side of the `pytch` GUI contains the so called "trajectory view" which, depending on the chosen tab, either visualizes the F0-trajectories of all voices ("pitches"), or the harmonic intervals between the voices ("differential"). Using the controls in the left-side menu, the user can select the F0-estimation algorithm and improve the real-time visualization by adjusting the confidence threshold, the median filter length for smoothing and the tolerance of the gradient filter. The F0- and interval-trajectories can be visualized with respect to different fixed or time-varying reference frequencies. In particular, the user can choose a fixed reference frequency (as specified in the left menu) or variable reference frequencies stemming from a specified channel or simply the lowest/highest voice. Furthermore, the user can set the axis limits of the the right-side tabs. +* cents explanation # Acknowledgements **DFG? Uni Potsdam?** From 9fb86a8ea7bcc8fffb6e26c6510a465d7d966b0c Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 11 Apr 2025 11:08:10 +0200 Subject: [PATCH 12/27] improvements --- paper/paper.bib | 109 +++++++++++++++++++++++++++++++++--------------- paper/paper.md | 32 +++++++------- 2 files changed, 89 insertions(+), 52 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 7ceb81b..2e72338 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -1,3 +1,25 @@ +@inproceedings{MeierSM25_RealTimeF0_ISMIR, +author = {Peter Meier and Sebastian Strahl and Simon Schw{\"a}r and Meinard M{\"u}ller}, +title = {libf0-realtime: TODO}, +booktitle = {Submitted to the International Society for Music Information Retrieval Conference ({ISMIR})}, +address = {}, +year = {2025}, +url-pdf = {}, +url-code = {} +} + +@article{MeierCM24_RealTimePLP_TISMIR, +author = {Peter Meier and Ching-Yu Chiu and Meinard M{\"u}ller}, +title = {{A} Real-Time Beat Tracking System with Zero Latency and Enhanced Controllability}, +journal = {Transactions of the International Society for Music Information Retrieval ({TISMIR})}, +year = {2024}, +volume = {7}, +number = {1}, +pages = {213--227}, +doi = {10.5334/tismir.189}, +url-demo = {https://audiolabs-erlangen.de/resources/MIR/2024-TISMIR-RealTimePLP} +} + @article{BerglinPD22_VisualFeedback_JPM, author = {Jacob Berglin and Peter Q Pfordresher and Steven Demorest}, title = {The effect of visual and auditory feedback on adult poor-pitch remediation}, @@ -10,16 +32,28 @@ @article{BerglinPD22_VisualFeedback_JPM URL = {https://doi.org/10.1177/03057356211026730}, } -@inproceedings{MeierSM25_RealTimeF0_ISMIR, -author = {Peter Meier and Sebastian Strahl and Simon Schw{\"a}r and Meinard M{\"u}ller}, -title = {libf0-realtime: TODO}, -booktitle = {Submitted to the International Society for Music Information Retrieval Conference ({ISMIR})}, -address = {}, -year = {2025}, -url-pdf = {}, -url-code = {} +@inproceedings{RosenzweigSM22_libf0_ISMIR-LBD, +author = {Sebastian Rosenzweig and Simon Schw{\"a}r and Meinard M{\"u}ller}, +title = {libf0: A Python Library for Fundamental Frequency Estimation}, +booktitle = {Late Breaking Demos of the International Society for Music Information Retrieval Conference ({ISMIR})}, +address = {Bengaluru, India}, +year = {2022}, +url-pdf = {https://archives.ismir.net/ismir2022/latebreaking/000003.pdf}, +url-code = {https://github.com/groupmm/libf0/} } +@book{Mueller21_FMP_SPRINGER, + author = {Meinard M\"{u}ller}, + title = {Fundamentals of Music Processing -- Using Python and Jupyter Notebooks}, + type = {Monograph}, + year = {2021}, + isbn = {978-3-030-69807-2}, + publisher = {Springer Verlag}, + edition = {2nd}, + pages = {1--495}, + doi = {10.1007/978-3-030-69808-9}, + url-details = {http://www.music-processing.de} +} @inproceedings{ScherbaumMRM19_MultimediaRecordings_FMA, author = {Frank Scherbaum and Nana Mzhavanadze and Sebastian Rosenzweig and Meinard M{\"u}ller}, @@ -30,34 +64,23 @@ @inproceedings{ScherbaumMRM19_MultimediaRecordings_FMA pages = {1--6} } - -@inproceedings{RosenzweigSM22_libf0_ISMIR-LBD, -author = {Sebastian Rosenzweig and Simon Schw{\"a}r and Meinard M{\"u}ller}, -title = {libf0: A Python Library for Fundamental Frequency Estimation}, -booktitle = {Late Breaking Demos of the International Society for Music Information Retrieval Conference ({ISMIR})}, -address = {Bengaluru, India}, -year = {2022}, -url-pdf = {https://archives.ismir.net/ismir2022/latebreaking/000003.pdf}, -url-code = {https://github.com/groupmm/libf0/} -} - -@article{Boersma01_Praat_GI, - author = {Paul Boersma}, - journal = {Glot International}, - number = {9/10}, - pages = {341--345}, - title = {{Praat}, a system for doing phonetics by computer}, - volume = {5}, - year = {2001} +@inproceedings{RosenzweigSM19_StableF0_ISMIR, + author = {Sebastian Rosenzweig and Frank Scherbaum and Meinard M{\"u}ller}, + title = {Detecting Stable Regions in Frequency Trajectories for Tonal Analysis of Traditional {G}eorgian Vocal Music}, + booktitle = {Proceedings of the International Society for Music Information Retrieval Conference ({ISMIR})}, + pages = {352--359}, + address = {Delft, The Netherlands}, + year = {2019}, + doi = {10.5281/zenodo.3527816} } -@inproceedings{CannamLS10_SonicVisualizer_ICMC, - author = {Chris Cannam and Christian Landone and Mark B. Sandler}, - title = {Sonic {V}isualiser: An Open Source Application for Viewing, Analysing, and Annotating Music Audio Files}, - booktitle = {Proceedings of the International Conference on Multimedia}, - address = {Florence, Italy}, - pages = {1467--1468}, - year = {2010}, +@article{Scherbaum16_LarynxMicrophones_IWFMA, + author = {Frank Scherbaum}, + title = {On the Benefit of Larynx-Microphone Field Recordings for the Documentation and Analysis of Polyphonic Vocal Music}, + journal = {Proceedings of the International Workshop Folk Music Analysis}, + pages = {80--87}, + address = {Dublin,Ireland}, + year = {2016} } @article{SixCL13_Tarsos_JNMR, @@ -71,6 +94,24 @@ @article{SixCL13_Tarsos_JNMR doi = {10.1080/09298215.2013.797999} } +@inproceedings{CannamLS10_SonicVisualizer_ICMC, + author = {Chris Cannam and Christian Landone and Mark B. Sandler}, + title = {Sonic {V}isualiser: An Open Source Application for Viewing, Analysing, and Annotating Music Audio Files}, + booktitle = {Proceedings of the International Conference on Multimedia}, + address = {Florence, Italy}, + pages = {1467--1468}, + year = {2010}, +} + +@article{Boersma01_Praat_GI, + author = {Paul Boersma}, + journal = {Glot International}, + number = {9/10}, + pages = {341--345}, + title = {{Praat}, a system for doing phonetics by computer}, + volume = {5}, + year = {2001} +} diff --git a/paper/paper.md b/paper/paper.md index e6e497f..b2d26e5 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -40,47 +40,43 @@ affiliations: index: 4 - name: Tantive GmbH, Nürnberg, Germany index: 5 -date: 7 March 2025 +date: 11 April 2025 bibliography: paper.bib --- # Summary -Polyphonic singing is one of the most widespread forms of music-making. During a performance, singers must constantly adjust their pitch to stay in tune with one another — a complex skill that requires extensive practice. Research has shown that pitch monitoring tools can assist singers in fine-tuning their intonation during a performance [@BerglinPD22_VisualFeedback_JPM]. Specifically, real-time visualizations of the fundamental frequency (F0), which represents the pitch of the singing voice, help singers in assessing their pitch relative to a fixed reference or other voices. -To support the monitoring of polyphonic singing performances, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record, process and visualize multiple voices in real time. The GUI displays vocal spectra and estimated F0-trajectories for all singers, as well as the harmonic intervals between them. Additionally, users can adjust visual and algorithmic parameters interactively to accommodate different input devices, microphone signals, singing styles and use cases. Written in Python, `pytch` utilizes the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR] for real-time F0-estimation and the `pyqtgraph` library [^1] to visualize the analysis results. +Polyphonic singing is one of the most widespread forms of music-making. During a performance, singers must constantly adjust their pitch to stay in tune with one another — a complex skill that requires extensive practice. Research has shown that pitch monitoring tools can assist singers in fine-tuning their intonation during a performance [@BerglinPD22_VisualFeedback_JPM]. Specifically, real-time visualizations of the fundamental frequency (F0), which represents the pitch of the singing voice, help singers assess their pitch relative to a fixed reference or other voices. +To support the monitoring of polyphonic singing performances, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record, process, and visualize multiple voices in real time. The GUI displays vocal spectra and estimated F0 trajectories for all singers, as well as the harmonic intervals between them. Additionally, users can adjust visual and algorithmic parameters interactively to accommodate different input devices, microphone signals, singing styles and use cases. Written in Python, `pytch` utilizes the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR] for real-time F0 estimation and `pyqtgraph`[^1] for efficient visualizations of the analysis results. [^1]: # Statement of Need -Software that assesses the pitch of a singing voice in real time is mostly known from Karaoke singing applications, such as Let's Sing[^2], Rock Band[^3], or Cantamus[^4], where the singer sings along a backing track and the pitch is compared to a score reference in order to analyse whether the sung notes are "correct" or "incorrect". However, these applications face several limitations when applied to polyphonic singing. Most notably, many Karaoke systems can only process one or two singing voices at a time, which is problematic for analyzing group performances. Additionally, software that rely on a score as a reference pose challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether [@ScherbaumMRM19_MultimediaRecordings_FMA]. Finally, existing open-source research software for singing voice processing, like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], lack real-time feedback, preventing an effective feedback loop between singers and their tool. +Software that assesses the pitch of a singing voice in real time is mostly known from Karaoke singing applications, such as Let's Sing[^2], Rock Band[^3], or Cantamus[^4]. These tools typically compare the singer’s pitch to a score reference to judge whether notes are ‘correct’ or ‘incorrect’. However, such applications face several limitations when applied to polyphonic or group singing contexts. Most notably, many Karaoke systems can only process one or two singing voices at a time, which is problematic for monitoring group performances. Additionally, software that relies on a score as a reference poses challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether [@ScherbaumMRM19_MultimediaRecordings_FMA]. Finally, existing open-source research software for singing voice processing, like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], lack real-time feedback, preventing an effective feedback loop between singers and their tool. -To address these challenges, we developed `pytch`. During rehearsals, singers and conductors can use our tool to analyze and improve several aspects of the performance. E.g., the vocal spectra can help singers to fine-tune the expression of formant frequencies. Also, melodic and harmonic problems can be identified with the help of the displayed F0-trajectories and the harmonic intervals. Unlike existing tools, `pytch` doesn't require score information and is thus suited for use with various singing genres. Apart from its use in practice, `pytch` can also serve as a platform for conducting music processing research on real-time algorithms. Real-time audio processing comes along with two main challenges. First, the audio is processed in short chunks, which provide limited audio context for the algorithms. Second, the audio needs to be processed fast enough to enable a smooth vsiualisation. Reserachers in this field can use `pytch` to develop, test, and compare real-time audio processing algorithms such as for F0-estimation or for signal enhancement. +To address these challenges, we developed `pytch`, a tool that enables singers and conductors to analyze and improve various aspects of a performance during rehearsals. For example, the vocal spectra can help singers fine-tune the expression of formant frequencies, while melodic and harmonic issues become visible through F0 trajectories and harmonic intervals. Unlike many existing tools, `pytch` does not require a musical score, making it well-suited for a wide variety of singing traditions, including a cappella and orally transmitted genres. + +In addition to its practical applications, `pytch` also provides a flexible platform for music information retrieval (MIR) research on real-time audio processing. Working with real-time data introduces challenges such as a limited audio context for analysis and strict timing constraints to ensure low-latency processing. Researchers can use `pytch` to develop, test, and compare algorithms for tasks like F0 estimation and signal enhancement [@MeierCM24_RealTimePLP_TISMIR]. [^2]: [^3]: -[^4]: - -* peter meier paper? +[^4]: # Audio Processing -The real-time audio processing pipeline implemented in the file `audio.py` is the heart of `pytch`. As shown in Figure X, the audio processing consists of two main processes, the recording and the analysis process. The recording process records multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. +The real-time audio processing pipeline implemented in the file `audio.py` is the heart of `pytch` and consists of two main stages: recording and analysis. The recording stage records multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. -For each channel, the analysis process computes the audio level in dBFS, magnitude short-time fourier transform (also referred to as spectrogram, see FMP for more information on basic audio processing algorithms), and an estimate of the F0 along with a confidence value using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0-estimation algorithms, such as YIN[@CheveigneK02_YIN_JASA] and SWIPE. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. The F0-estimates, which are natively computed in the unit Hz are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and singing voice, estimated F0s may exhibit several artifacts such as incontinuities or slides which can make the trajectories hard to interpret. To this end, `pytch` offers optional cleaning operations, e.g. a threshold to remove estimated with low confidence, a median filter to smooth the trajectories and a gradient filter to remove trajectory slides. As a last audio feature, the harmonic intervals between the F0-trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. Once the processing is completed, the audio pipeline informs the GUI via a flag that new data for visualization is available. - -* refer to basic literature in audio processing +For each channel, the analysis stage computes the audio level in dBFS, a time-frequency representation of the audio signal via the Short-Time Fourier Transform (see [@Mueller21_FMP_SPRINGER] for fundamentals of music processing), and an estimate of the F0 along with a confidence value using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0 estimation algorithms, such as YIN [@CheveigneK02_YIN_JASA] and SWIPE. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. It is computationally efficient and well-suited for low-latency applications, but it tends to suffer from estimation errors, particularly confusions with higher harmonics such as the octave, for noisy signals. In contrast, SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. While more computationally demanding, SWIPE typically yields more reliable estimates, in particular for vocal input signals. `pytch` allows users to choose between these algorithms depending on their specific needs and system capabilities. The obtained F0 estimates, which are natively computed in the unit Hz are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and vocal characteristics, F0 estimates may exhibit artifacts such as discontinuities or pitch slides, which can make the resulting trajectories difficult to interpret [@RosenzweigSM19_StableF0_ISMIR]. Previous research has shown that using throat microphones can improve the isolation of individual voices in group singing contexts, resulting in cleaner signals and more accurate F0 estimates [@Scherbaum16_LarynxMicrophones_IWFMA]. To further enhance interpretability, `pytch` includes several optional post-processing steps: a confidence threshold to discard estimates with low confidence score, a median filter to smooth the trajectories, and a gradient filter to suppress abrupt pitch slides. As a final step in the audio analysis, the harmonic intervals between the F0 trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. After processing, the pipeline sets a flag that notifies the GUI that new data is ready for visualization. # Graphical User Interface (GUI) -In the following, we explain step-by-step the GUI of `pytch`. Right after the program start, a startup menu opens in which the user is asked to specify the soundcard, the input channels, the sampling rate, and the window size for processing. Furthermore, the user can choose to store the recorded audio and the F0-trajectories on disk. These initial settings are important tfor initialising the audio processing module and the main GUI which is loaded when the user clicks `ok`. While there is no hard limit on the number of channels, we recommend to use up to four input channels to ensure visibility of the charts and responsiveness of the GUI. A screenshot of the main GUI is shown in Figure \autoref{fig:GUI}. - -![`pytch` GUI.\label{fig:GUI}](../pictures/screenshot.png){ width=90% } +In this section, we provide a step-by-step explanation of the `pytch` GUI implemented in the file `gui.py`. Right after the program start, a startup menu opens in which the user is asked to specify the soundcard, input channels, sampling rate, and window size for processing. Furthermore, the user can choose to store the recorded audio and the F0 trajectories on disk. These configuration choices are required to initialize the audio processing module and the main GUI which is loaded when the user clicks "ok". While there is no hard limit on the number of channels, we recommend to use up to four input channels to ensure visibility of the charts and responsiveness of the GUI. A screenshot of the main GUI is shown in Figure \autoref{fig:GUI}. -The GUI is divided in three horizontal parts. The left side contains a menu that includes a start/stop button, and controls to adjust the GUI appearance and tune the algorithmic parameters. In the center of the `pytch` GUI, we find the so called "channel views" - visualisations in a dedicated color for each channel consisting of a level meter for microphone gain control, a spectrum visualisation with the current F0-value marked with a vertical line, and a spectrogram visualisation. The menu on the left can be used to enable or disable views, change the magnitude scaling of the spectrum and the spectrogram, and change the visible frequency range. The channels are ordered from top to bottom in order of selection in the startup menu. Optionally, the bottom channel can show a product of all channels. +![`pytch` GUI monitoring three singing voices.\label{fig:GUI}](../pictures/screenshot.png){ width=90% } -The right side of the `pytch` GUI contains the so called "trajectory view" which, depending on the chosen tab, either visualizes the F0-trajectories of all voices ("pitches"), or the harmonic intervals between the voices ("differential"). Using the controls in the left-side menu, the user can select the F0-estimation algorithm and improve the real-time visualization by adjusting the confidence threshold, the median filter length for smoothing and the tolerance of the gradient filter. The F0- and interval-trajectories can be visualized with respect to different fixed or time-varying reference frequencies. In particular, the user can choose a fixed reference frequency (as specified in the left menu) or variable reference frequencies stemming from a specified channel or simply the lowest/highest voice. Furthermore, the user can set the axis limits of the the right-side tabs. +The main GUI is organized into three horizontal sections. On the left, a control panel provides a start/stop button and allows users to adjust both the visual layout and algorithmic parameters. The central section displays "channel views"--one for each input channel--color-coded for clarity. Each view includes a microphone level meter, a real-time spectrum display with a vertical line marking the current F0 estimate, and a scrolling spectrogram with a 5 second time context. Channels are listed from top to bottom in the order they were selected during setup. Optionally, the bottommost view can display a product signal from all channels. -* cents explanation +The right section, referred to as the "trajectory view," provides time-based visualizations of either the F0 trajectories ("pitches" tab) or the harmonic intervals between voices ("differential" tab) with a 10 second time context. Using the controls in the left-side menu, the user can select the F0 estimation algorithm and improve the real-time visualization by adjusting the confidence threshold, the median filter length for smoothing and the tolerance of the gradient filter. F0 and interval trajectories can be displayed with respect to a fixed reference frequency or a dynamic one derived from a selected channel, the lowest, or highest detected voice. Axis limits for this section can also be manually set. # Acknowledgements **DFG? Uni Potsdam?** From 55869806f5f57bf4af4fba4c8d2c9562bd2a8103 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 23 May 2025 16:49:36 +0200 Subject: [PATCH 13/27] feedback --- paper/paper.bib | 24 ++++++++++++++++++++++++ paper/paper.md | 50 ++++++++++++++++++++++++------------------------- 2 files changed, 48 insertions(+), 26 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 2e72338..6b6e924 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -74,6 +74,14 @@ @inproceedings{RosenzweigSM19_StableF0_ISMIR doi = {10.5281/zenodo.3527816} } +@inproceedings{KriegerowskiS_Pytch_2017, + author = {Marius Kriegerowski and Frank Scherbaum}, + title = {Pytch - simultane mehrkanalige Audioanalyse von Gesangstimmen}, + booktitle = {Late-breaking Demos of the Workshop: Musik trifft Informatik at 47. Jahrestagung der Gesellschaft für Informatik}, + year = {2017}, + address = {Chemnitz, Germany}, +} + @article{Scherbaum16_LarynxMicrophones_IWFMA, author = {Frank Scherbaum}, title = {On the Benefit of Larynx-Microphone Field Recordings for the Documentation and Analysis of Polyphonic Vocal Music}, @@ -250,3 +258,19 @@ @book{Mueller21_FMP_SPRINGER doi = {10.1007/978-3-030-69808-9}, url-details = {http://www.music-processing.de} } + +@article{Scherbaum16_LarynxMicrophones_IWFMA, + author = {Frank Scherbaum}, + title = {On the Benefit of Larynx-Microphone Field Recordings for the Documentation and Analysis of Polyphonic Vocal Music}, + journal = {Proceedings of the International Workshop Folk Music Analysis}, + pages = {80--87}, + address = {Dublin,Ireland}, + year = {2016} +} + +@book{HagermanS80_Barbershop_CITESEER, + title = {Fundamental frequency adjustment in barbershop singing}, + author = {B Hagerman and Johan Sundberg}, + year = {1980}, + publisher = {Citeseer} +} diff --git a/paper/paper.md b/paper/paper.md index b2d26e5..6f240a6 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -1,5 +1,5 @@ --- -title: '`pytch`: A Real-Time Monitoring Tool For Polyphonic Singing Performances' +title: '`pytch` v2: A Real-Time Monitoring Tool For Polyphonic Singing Performances' tags: - Python - Audio Processing @@ -10,18 +10,10 @@ authors: - name: Sebastian Rosenzweig orcid: 0000-0003-4964-9217 corresponding: true - affiliation: "1,2" + affiliation: 1 - name: Marius Kriegerowski orcid: corresponding: false - affiliation: 4 - - name: Lukas Dietz - orcid: - corresponding: false - affiliation: 2 - - name: Meinard Müller - orcid: 0000-0001-6062-7524 - corresponding: false affiliation: 2 - name: Frank Scherbaum orcid: 0000-0002-5050-7331 @@ -32,44 +24,51 @@ authors: affiliations: - name: Audoo Ltd., London, United Kingdom index: 1 - - name: International Audio Laboratories Erlangen, Erlangen, Germany + - name: Independent Researcher, Berlin, Germany index: 2 - name: University of Potsdam, Potsdam, Germany index: 3 - - name: Independent Researcher, Berlin, Germany - index: 4 - - name: Tantive GmbH, Nürnberg, Germany - index: 5 -date: 11 April 2025 +date: 23 May 2025 bibliography: paper.bib --- # Summary Polyphonic singing is one of the most widespread forms of music-making. During a performance, singers must constantly adjust their pitch to stay in tune with one another — a complex skill that requires extensive practice. Research has shown that pitch monitoring tools can assist singers in fine-tuning their intonation during a performance [@BerglinPD22_VisualFeedback_JPM]. Specifically, real-time visualizations of the fundamental frequency (F0), which represents the pitch of the singing voice, help singers assess their pitch relative to a fixed reference or other voices. To support the monitoring of polyphonic singing performances, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record, process, and visualize multiple voices in real time. The GUI displays vocal spectra and estimated F0 trajectories for all singers, as well as the harmonic intervals between them. Additionally, users can adjust visual and algorithmic parameters interactively to accommodate different input devices, microphone signals, singing styles and use cases. Written in Python, `pytch` utilizes the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR] for real-time F0 estimation and `pyqtgraph`[^1] for efficient visualizations of the analysis results. +Our tool builds upon a late-breaking demo in [@KriegerowskiS_Pytch_2017] - which we refer to as version 1. Since then, the tool has been significantly extended with a new real-time graphics engine, a modular audio processing backend that facilitates the integration of additional algorithms, and improved support for a wider range of platforms and recording hardware - which we refer to as version 2. Over its seven years of development, `pytch` has been tested and refined through use in several rehearsals, workshops, and field studies — including Sardinian quartet singing (see demo video [^2]) and traditional Georgian singing (see demo video [^3]). [^1]: +[^2]: +[^3]: + # Statement of Need -Software that assesses the pitch of a singing voice in real time is mostly known from Karaoke singing applications, such as Let's Sing[^2], Rock Band[^3], or Cantamus[^4]. These tools typically compare the singer’s pitch to a score reference to judge whether notes are ‘correct’ or ‘incorrect’. However, such applications face several limitations when applied to polyphonic or group singing contexts. Most notably, many Karaoke systems can only process one or two singing voices at a time, which is problematic for monitoring group performances. Additionally, software that relies on a score as a reference poses challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether [@ScherbaumMRM19_MultimediaRecordings_FMA]. Finally, existing open-source research software for singing voice processing, like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], lack real-time feedback, preventing an effective feedback loop between singers and their tool. +Software that assesses the pitch of a singing voice in real time is mostly known from Karaoke singing applications, such as Let's Sing[^2], Rock Band[^3], or Cantamus[^4]. These tools typically compare the singer’s pitch to a score reference to judge whether notes are ‘correct’ or ‘incorrect’. However, such applications face several limitations when applied to polyphonic or group singing contexts. Most notably, many Karaoke systems can only process one or two singing voices at a time, which is problematic for monitoring group performances. Additionally, software that relies on a score as a reference poses challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether. Finally, existing open-source research software for singing voice processing, like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], lack real-time feedback, preventing an effective feedback loop between singers and their tool. -To address these challenges, we developed `pytch`, a tool that enables singers and conductors to analyze and improve various aspects of a performance during rehearsals. For example, the vocal spectra can help singers fine-tune the expression of formant frequencies, while melodic and harmonic issues become visible through F0 trajectories and harmonic intervals. Unlike many existing tools, `pytch` does not require a musical score, making it well-suited for a wide variety of singing traditions, including a cappella and orally transmitted genres. +To address these challenges, we developed `pytch`. Our tool is currently the only software that enables singers and conductors to monitor and train harmonic interval singing in real time — a skill that is essential in many singing traditions. This includes not only polyphonic genres such as traditional Georgian vocal music [@ScherbaumMRM19_MultimediaRecordings_FMA] or Barbershop singing [@HagermanS80_Barbershop_CITESEER], where precise tuning between voices is stylistically central, but also the practice of non-tempered tuning systems found in various oral traditions. In particular, the vocal spectra can help singers fine-tune the expression of formant frequencies, while melodic and harmonic issues become visible through F0 trajectories and harmonic intervals. Unlike many existing tools, `pytch` does not require a musical score, making it well-suited for rehearsals, ethnomusicological research and pedagogical contexts focused on intonation and harmonic listening. In addition to its practical applications, `pytch` also provides a flexible platform for music information retrieval (MIR) research on real-time audio processing. Working with real-time data introduces challenges such as a limited audio context for analysis and strict timing constraints to ensure low-latency processing. Researchers can use `pytch` to develop, test, and compare algorithms for tasks like F0 estimation and signal enhancement [@MeierCM24_RealTimePLP_TISMIR]. -[^2]: -[^3]: -[^4]: +[^4]: +[^5]: +[^6]: -# Audio Processing + +# Multitrack Singing Recordings + +To fully leverage the capabilities of `pytch`, it is essential to record each singer with an individual microphone. In contrast, stereo recordings—such as those captured by a room microphone placed in front of the ensemble—often suffer from overlapping signals, making it difficult to analyze individual voices. Suitable multitrack recordings can be obtained using handheld dynamic microphones or headset microphones, both of which provide good audio quality. However, these setups are prone to cross-talk, especially when singers are positioned close together. + +One way to reduce cross-talk is to increase the physical distance between singers or to record them in isolation. However, this is not always feasible, as singers need to hear one another to maintain accurate tuning. An effective workaround is the use of contact microphones, such as throat microphones, which capture vocal fold vibrations directly from the skin of the throat. This method offers a significant advantage: the recorded signals are largely immune to interference from other singers, resulting in much cleaner, more isolated recordings [@Scherbaum16_LarynxMicrophones_IWFMA]. + + +# Audio Processing The real-time audio processing pipeline implemented in the file `audio.py` is the heart of `pytch` and consists of two main stages: recording and analysis. The recording stage records multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. -For each channel, the analysis stage computes the audio level in dBFS, a time-frequency representation of the audio signal via the Short-Time Fourier Transform (see [@Mueller21_FMP_SPRINGER] for fundamentals of music processing), and an estimate of the F0 along with a confidence value using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0 estimation algorithms, such as YIN [@CheveigneK02_YIN_JASA] and SWIPE. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. It is computationally efficient and well-suited for low-latency applications, but it tends to suffer from estimation errors, particularly confusions with higher harmonics such as the octave, for noisy signals. In contrast, SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. While more computationally demanding, SWIPE typically yields more reliable estimates, in particular for vocal input signals. `pytch` allows users to choose between these algorithms depending on their specific needs and system capabilities. The obtained F0 estimates, which are natively computed in the unit Hz are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and vocal characteristics, F0 estimates may exhibit artifacts such as discontinuities or pitch slides, which can make the resulting trajectories difficult to interpret [@RosenzweigSM19_StableF0_ISMIR]. Previous research has shown that using throat microphones can improve the isolation of individual voices in group singing contexts, resulting in cleaner signals and more accurate F0 estimates [@Scherbaum16_LarynxMicrophones_IWFMA]. To further enhance interpretability, `pytch` includes several optional post-processing steps: a confidence threshold to discard estimates with low confidence score, a median filter to smooth the trajectories, and a gradient filter to suppress abrupt pitch slides. As a final step in the audio analysis, the harmonic intervals between the F0 trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. After processing, the pipeline sets a flag that notifies the GUI that new data is ready for visualization. +For each channel, the analysis stage computes the audio level in dBFS, a time-frequency representation of the audio signal via the Short-Time Fourier Transform (see [@Mueller21_FMP_SPRINGER] for fundamentals of music processing), and an estimate of the F0 along with a confidence value using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0 estimation algorithms, such as YIN [@CheveigneK02_YIN_JASA] and SWIPE. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. It is computationally efficient and well-suited for low-latency applications, but it tends to suffer from estimation errors, particularly confusions with higher harmonics such as the octave. In contrast, SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. While more computationally demanding, SWIPE typically yields more reliable estimates, in particular for vocal input signals. `pytch` allows users to choose between these algorithms depending on their specific needs and system capabilities. The obtained F0 estimates, which are natively computed in the unit Hz are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and vocal characteristics, F0 estimates may exhibit artifacts such as discontinuities or pitch slides, which can make the resulting trajectories difficult to interpret [@RosenzweigSM19_StableF0_ISMIR]. Previous research has shown that using throat microphones can improve the isolation of individual voices in group singing contexts, resulting in cleaner signals and more accurate F0 estimates [@Scherbaum16_LarynxMicrophones_IWFMA]. To further enhance interpretability, `pytch` includes several optional post-processing steps: a confidence threshold to discard estimates with low confidence score, a median filter to smooth the trajectories, and a gradient filter to suppress abrupt pitch slides. As a final step in the audio analysis, the harmonic intervals between the F0 trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. After processing, the pipeline sets a flag that notifies the GUI that new data is ready for visualization. # Graphical User Interface (GUI) - In this section, we provide a step-by-step explanation of the `pytch` GUI implemented in the file `gui.py`. Right after the program start, a startup menu opens in which the user is asked to specify the soundcard, input channels, sampling rate, and window size for processing. Furthermore, the user can choose to store the recorded audio and the F0 trajectories on disk. These configuration choices are required to initialize the audio processing module and the main GUI which is loaded when the user clicks "ok". While there is no hard limit on the number of channels, we recommend to use up to four input channels to ensure visibility of the charts and responsiveness of the GUI. A screenshot of the main GUI is shown in Figure \autoref{fig:GUI}. ![`pytch` GUI monitoring three singing voices.\label{fig:GUI}](../pictures/screenshot.png){ width=90% } @@ -79,7 +78,6 @@ The main GUI is organized into three horizontal sections. On the left, a control The right section, referred to as the "trajectory view," provides time-based visualizations of either the F0 trajectories ("pitches" tab) or the harmonic intervals between voices ("differential" tab) with a 10 second time context. Using the controls in the left-side menu, the user can select the F0 estimation algorithm and improve the real-time visualization by adjusting the confidence threshold, the median filter length for smoothing and the tolerance of the gradient filter. F0 and interval trajectories can be displayed with respect to a fixed reference frequency or a dynamic one derived from a selected channel, the lowest, or highest detected voice. Axis limits for this section can also be manually set. # Acknowledgements -**DFG? Uni Potsdam?** -We would like to thank all the singers who contributed to testing `pytch` during its development. The International Audio Laboratories Erlangen are a joint institution of the Friedrich-Alexander-Universität Erlangen-Nürnberg (FAU) and Fraunhofer Institute for Integrated Circuits IIS. +We would like to thank all the singers who contributed to testing `pytch` during its development. # References From a335cdee6305ca4c805655762d885040246fed17 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Sun, 25 May 2025 22:11:59 +0200 Subject: [PATCH 14/27] literature tidy-up --- paper/paper.bib | 172 +++++++++--------------------------------------- paper/paper.md | 12 ++-- 2 files changed, 37 insertions(+), 147 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 6b6e924..de2a77e 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -55,6 +55,20 @@ @book{Mueller21_FMP_SPRINGER url-details = {http://www.music-processing.de} } +@article{RosenzweigCWSGM20_DCS_TISMIR, + author = {Sebastian Rosenzweig and Helena Cuesta and Christof Wei{\ss} and Frank Scherbaum and Emilia G{\'o}mez and Meinard M{\"u}ller}, + title = {{D}agstuhl {ChoirSet}: {A} Multitrack Dataset for {MIR} Research on Choral Singing}, + journal = {Transactions of the International Society for Music Information Retrieval ({TISMIR})}, + volume = {3}, + number = {1}, + year = {2020}, + pages = {98--110}, + publisher = {Ubiquity Press}, + doi = {10.5334/tismir.48}, + url-pdf = {2020_RosenzweigCWSGM_DagstuhlChoirSet_TISMIR_ePrint.pdf}, + url-demo = {https://www.audiolabs-erlangen.de/resources/MIR/2020-DagstuhlChoirSet} +} + @inproceedings{ScherbaumMRM19_MultimediaRecordings_FMA, author = {Frank Scherbaum and Nana Mzhavanadze and Sebastian Rosenzweig and Meinard M{\"u}ller}, title = {Multi-media recordings of traditional {G}eorgian vocal music for computational analysis}, @@ -111,77 +125,6 @@ @inproceedings{CannamLS10_SonicVisualizer_ICMC year = {2010}, } -@article{Boersma01_Praat_GI, - author = {Paul Boersma}, - journal = {Glot International}, - number = {9/10}, - pages = {341--345}, - title = {{Praat}, a system for doing phonetics by computer}, - volume = {5}, - year = {2001} -} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -@article{CheveigneK02_YIN_JASA, - author = {Alain de Cheveign{\'e} and Hideki Kawahara}, - title = {{YIN}, a fundamental frequency estimator for speech and music.}, - journal = {Journal of the Acoustical Society of America (JASA)}, - year = {2002}, - volume = {111}, - pages = {1917--1930}, - number = {4}, -} - -@inproceedings{MauchD14_pYIN_ICASSP, - author = {Matthias Mauch and Simon Dixon}, - title = {{pYIN}: A Fundamental Frequency Estimator Using Probabilistic Threshold Distributions}, - booktitle = {{IEEE} International Conference on Acoustics, Speech and Signal Processing ({ICASSP})}, - year = {2014}, - address = {Florence, Italy}, - pages = {659--663}, -} - -@article{SalamonG12_MelodyExtraction_TASLP, - Author = {Justin Salamon and Emilia G{\'o}mez}, - Title = {Melody Extraction from Polyphonic Music Signals using Pitch Contour Characteristics}, - Journal = {IEEE Transactions on Audio, Speech, and Language Processing}, - Number = {6}, - Volume = {20}, - Pages = {1759--1770}, - Year = {2012}, - doi = {10.1109/TASL.2012.2188515} -} - @article{CamachoH08_SawtoothWaveform_JASA, author = {Arturo Camacho and John G. Harris}, title = {A sawtooth waveform inspired pitch estimator for speech and music}, @@ -193,79 +136,24 @@ @article{CamachoH08_SawtoothWaveform_JASA pages = {1638--1652}, } -@inproceedings{BittnerFRJCK19_mirdata_ISMIR, - author = {Rachel M. Bittner and Magdalena Fuentes and David Rubinstein and Andreas Jansson and Keunwoo Choi and Thor Kell}, - title = {{mirdata}: Software for Reproducible Usage of Datasets}, - booktitle = {Proceedings of the International Society for Music Information Retrieval Conference ({ISMIR})}, - pages = {99--106}, - year = {2019}, - address = {Delft, The Netherlands}, - url = {http://archives.ismir.net/ismir2019/paper/000009.pdf} -} - -@inproceedings{RaffelMHSNLE14_MirEval_ISMIR, - author = {Colin Raffel and Brian McFee and Eric J. Humphrey and Justin Salamon and Oriol Nieto and Dawen Liang and Daniel P. W. Ellis}, - title = {{MIR{\_}EVAL}: {A} Transparent Implementation of Common {MIR} Metrics}, - pages = {367--372}, - booktitle = {Proceedings of the International Society for Music Information Retrieval Conference ({ISMIR})}, - address = {Taipei, Taiwan}, - year = {2014}, -} - -@article{RosenzweigCWSGM20_DCS_TISMIR, - author = {Sebastian Rosenzweig and Helena Cuesta and Christof Wei{\ss} and Frank Scherbaum and Emilia G{\'o}mez and Meinard M{\"u}ller}, - title = {{D}agstuhl {ChoirSet}: {A} Multitrack Dataset for {MIR} Research on Choral Singing}, - journal = {Transactions of the International Society for Music Information Retrieval ({TISMIR})}, - volume = {3}, - number = {1}, - year = {2020}, - pages = {98--110}, - publisher = {Ubiquity Press}, - doi = {10.5334/tismir.48}, - url-pdf = {2020_RosenzweigCWSGM_DagstuhlChoirSet_TISMIR_ePrint.pdf}, - url-demo = {https://www.audiolabs-erlangen.de/resources/MIR/2020-DagstuhlChoirSet} -} - -@inproceedings{BittnerSBB17_PitchContours_AES, - author = {Rachel M. Bittner and Justin Salamon and Juan J. Bosch and Juan Pablo Bello}, - title = {Pitch Contours as a Mid-Level Representation for Music Informatics}, - booktitle = {Proceedings of the {AES} International Conference on Semantic Audio}, - address = {Erlangen, Germany}, - pages = {100--107}, - year = {2017}, - url = {http://www.aes.org/e-lib/browse.cfm?elib=18756} -} - -@inproceedings{RosenzweigSM21_F0Reliability_ICASSP, - author = {Sebastian Rosenzweig and Frank Scherbaum and Meinard M{\"u}ller}, - title = {Reliability Assessment of Singing Voice {F0}-Estimates Using Multiple Algorithms}, - booktitle = {Proceedings of the {IEEE} International Conference on Acoustics, Speech, and Signal Processing ({ICASSP})}, - pages = {261--265}, - address = {Toronto, Canada}, - year = {2021}, - doi = {10.1109/ICASSP39728.2021.9413372} -} - -@book{Mueller21_FMP_SPRINGER, - author = {Meinard M\"{u}ller}, - title = {Fundamentals of Music Processing -- Using Python and Jupyter Notebooks}, - type = {Monograph}, - year = {2021}, - isbn = {978-3-030-69807-2}, - publisher = {Springer Verlag}, - edition = {2nd}, - pages = {1--495}, - doi = {10.1007/978-3-030-69808-9}, - url-details = {http://www.music-processing.de} +@article{CheveigneK02_YIN_JASA, + author = {Alain de Cheveign{\'e} and Hideki Kawahara}, + title = {{YIN}, a fundamental frequency estimator for speech and music.}, + journal = {Journal of the Acoustical Society of America (JASA)}, + year = {2002}, + volume = {111}, + pages = {1917--1930}, + number = {4}, } -@article{Scherbaum16_LarynxMicrophones_IWFMA, - author = {Frank Scherbaum}, - title = {On the Benefit of Larynx-Microphone Field Recordings for the Documentation and Analysis of Polyphonic Vocal Music}, - journal = {Proceedings of the International Workshop Folk Music Analysis}, - pages = {80--87}, - address = {Dublin,Ireland}, - year = {2016} +@article{Boersma01_Praat_GI, + author = {Paul Boersma}, + journal = {Glot International}, + number = {9/10}, + pages = {341--345}, + title = {{Praat}, a system for doing phonetics by computer}, + volume = {5}, + year = {2001} } @book{HagermanS80_Barbershop_CITESEER, diff --git a/paper/paper.md b/paper/paper.md index 6f240a6..238cdf3 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -9,10 +9,12 @@ tags: authors: - name: Sebastian Rosenzweig orcid: 0000-0003-4964-9217 + equal-contrib: true corresponding: true affiliation: 1 - name: Marius Kriegerowski orcid: + equal-contrib: true corresponding: false affiliation: 2 - name: Frank Scherbaum @@ -35,7 +37,7 @@ bibliography: paper.bib # Summary Polyphonic singing is one of the most widespread forms of music-making. During a performance, singers must constantly adjust their pitch to stay in tune with one another — a complex skill that requires extensive practice. Research has shown that pitch monitoring tools can assist singers in fine-tuning their intonation during a performance [@BerglinPD22_VisualFeedback_JPM]. Specifically, real-time visualizations of the fundamental frequency (F0), which represents the pitch of the singing voice, help singers assess their pitch relative to a fixed reference or other voices. To support the monitoring of polyphonic singing performances, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record, process, and visualize multiple voices in real time. The GUI displays vocal spectra and estimated F0 trajectories for all singers, as well as the harmonic intervals between them. Additionally, users can adjust visual and algorithmic parameters interactively to accommodate different input devices, microphone signals, singing styles and use cases. Written in Python, `pytch` utilizes the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR] for real-time F0 estimation and `pyqtgraph`[^1] for efficient visualizations of the analysis results. -Our tool builds upon a late-breaking demo in [@KriegerowskiS_Pytch_2017] - which we refer to as version 1. Since then, the tool has been significantly extended with a new real-time graphics engine, a modular audio processing backend that facilitates the integration of additional algorithms, and improved support for a wider range of platforms and recording hardware - which we refer to as version 2. Over its seven years of development, `pytch` has been tested and refined through use in several rehearsals, workshops, and field studies — including Sardinian quartet singing (see demo video [^2]) and traditional Georgian singing (see demo video [^3]). +Our tool builds upon a late-breaking demo in [@KriegerowskiS_Pytch_2017] - which we refer to as version 1. Since then, the tool has been significantly extended with a new real-time graphics engine, a modular audio processing backend that facilitates the integration of additional algorithms, and improved support for a wider range of platforms and recording hardware - which we refer to as version 2. Over its seven years of development, `pytch` has been tested and refined through use in several rehearsals, workshops, and field studies — including Sardinian quartet singing (see demo video[^2]) and traditional Georgian singing (see demo video[^3]). [^1]: [^2]: @@ -57,19 +59,19 @@ In addition to its practical applications, `pytch` also provides a flexible plat # Multitrack Singing Recordings -To fully leverage the capabilities of `pytch`, it is essential to record each singer with an individual microphone. In contrast, stereo recordings—such as those captured by a room microphone placed in front of the ensemble—often suffer from overlapping signals, making it difficult to analyze individual voices. Suitable multitrack recordings can be obtained using handheld dynamic microphones or headset microphones, both of which provide good audio quality. However, these setups are prone to cross-talk, especially when singers are positioned close together. +To fully leverage the capabilities of `pytch`, it is essential to record each singer with an individual microphone. Stereo recordings—such as those captured by a room microphone placed in front of the ensemble—often suffer from overlapping signals, making it difficult to analyze individual voices. While there is no hard limit on the number of channels, we recommend to record up to four individual singers to ensure visibility of the charts and responsiveness of the GUI. Suitable multitrack recordings can be obtained using handheld dynamic microphones or headset microphones. However, these setups are prone to cross-talk, especially when singers are positioned close together. -One way to reduce cross-talk is to increase the physical distance between singers or to record them in isolation. However, this is not always feasible, as singers need to hear one another to maintain accurate tuning. An effective workaround is the use of contact microphones, such as throat microphones, which capture vocal fold vibrations directly from the skin of the throat. This method offers a significant advantage: the recorded signals are largely immune to interference from other singers, resulting in much cleaner, more isolated recordings [@Scherbaum16_LarynxMicrophones_IWFMA]. +One way to reduce cross-talk is to increase the physical distance between singers or to record them in isolation. However, this is not always feasible, as singers need to hear one another to maintain accurate tuning. An effective workaround is the use of contact microphones, such as throat microphones, which capture vocal fold vibrations directly from the skin of the throat. This method offers a significant advantage: the recorded signals are largely immune to interference from other singers, resulting in much cleaner, more isolated recordings. Throat microphones have successfully been used to record vocal ensembles in several past studies [@Scherbaum16_LarynxMicrophones_IWFMA]. # Audio Processing The real-time audio processing pipeline implemented in the file `audio.py` is the heart of `pytch` and consists of two main stages: recording and analysis. The recording stage records multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. -For each channel, the analysis stage computes the audio level in dBFS, a time-frequency representation of the audio signal via the Short-Time Fourier Transform (see [@Mueller21_FMP_SPRINGER] for fundamentals of music processing), and an estimate of the F0 along with a confidence value using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0 estimation algorithms, such as YIN [@CheveigneK02_YIN_JASA] and SWIPE. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. It is computationally efficient and well-suited for low-latency applications, but it tends to suffer from estimation errors, particularly confusions with higher harmonics such as the octave. In contrast, SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. While more computationally demanding, SWIPE typically yields more reliable estimates, in particular for vocal input signals. `pytch` allows users to choose between these algorithms depending on their specific needs and system capabilities. The obtained F0 estimates, which are natively computed in the unit Hz are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and vocal characteristics, F0 estimates may exhibit artifacts such as discontinuities or pitch slides, which can make the resulting trajectories difficult to interpret [@RosenzweigSM19_StableF0_ISMIR]. Previous research has shown that using throat microphones can improve the isolation of individual voices in group singing contexts, resulting in cleaner signals and more accurate F0 estimates [@Scherbaum16_LarynxMicrophones_IWFMA]. To further enhance interpretability, `pytch` includes several optional post-processing steps: a confidence threshold to discard estimates with low confidence score, a median filter to smooth the trajectories, and a gradient filter to suppress abrupt pitch slides. As a final step in the audio analysis, the harmonic intervals between the F0 trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. After processing, the pipeline sets a flag that notifies the GUI that new data is ready for visualization. +For each channel, the analysis stage computes the audio level in dBFS, a time-frequency representation of the audio signal via the Short-Time Fourier Transform (see [@Mueller21_FMP_SPRINGER] for fundamentals of music processing), and an estimate of the F0 along with a confidence value using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0 estimation algorithms, such as YIN [@CheveigneK02_YIN_JASA] and SWIPE [@CamachoH08_SawtoothWaveform_JASA]. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. It is computationally efficient and well-suited for low-latency applications, but it tends to suffer from estimation errors, particularly confusions with higher harmonics such as the octave. In contrast, SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. While more computationally demanding, SWIPE typically yields more reliable estimates, in particular for vocal input signals. `pytch` allows users to choose between these algorithms depending on their specific needs and system capabilities. The obtained F0 estimates, which are natively computed in the unit Hz are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and vocal characteristics, F0 estimates may exhibit artifacts such as discontinuities or pitch slides, which can make the resulting trajectories difficult to interpret [@RosenzweigSM19_StableF0_ISMIR]. Previous research has shown that using throat microphones can improve the isolation of individual voices in group singing contexts, resulting in cleaner signals and more accurate F0 estimates [@Scherbaum16_LarynxMicrophones_IWFMA]. To further enhance interpretability, `pytch` includes several optional post-processing steps: a confidence threshold to discard estimates with low confidence score, a median filter to smooth the trajectories, and a gradient filter to suppress abrupt pitch slides. As a final step in the audio analysis, the harmonic intervals between the F0 trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. After processing, the pipeline sets a flag that notifies the GUI that new data is ready for visualization. # Graphical User Interface (GUI) -In this section, we provide a step-by-step explanation of the `pytch` GUI implemented in the file `gui.py`. Right after the program start, a startup menu opens in which the user is asked to specify the soundcard, input channels, sampling rate, and window size for processing. Furthermore, the user can choose to store the recorded audio and the F0 trajectories on disk. These configuration choices are required to initialize the audio processing module and the main GUI which is loaded when the user clicks "ok". While there is no hard limit on the number of channels, we recommend to use up to four input channels to ensure visibility of the charts and responsiveness of the GUI. A screenshot of the main GUI is shown in Figure \autoref{fig:GUI}. +In this section, we provide a step-by-step explanation of the `pytch` GUI implemented in the file `gui.py`. Right after the program start, a startup menu opens in which the user is asked to specify the soundcard, input channels, sampling rate, and window size for processing. Furthermore, the user can choose to store the recorded audio and the F0 trajectories on disk. These configuration choices are required to initialize the audio processing module and the main GUI which is loaded when the user clicks "ok". A screenshot of the main GUI which opens after successful initialization is shown in Figure \autoref{fig:GUI}. ![`pytch` GUI monitoring three singing voices.\label{fig:GUI}](../pictures/screenshot.png){ width=90% } From 26398c1492be83cc0578828f04da5ecebe734583 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Sat, 31 May 2025 16:45:26 +0200 Subject: [PATCH 15/27] feedback --- paper/paper.bib | 7 +++++++ paper/paper.md | 22 +++++++++++----------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index de2a77e..063b798 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -20,6 +20,13 @@ @article{MeierCM24_RealTimePLP_TISMIR url-demo = {https://audiolabs-erlangen.de/resources/MIR/2024-TISMIR-RealTimePLP} } +@phdthesis{Cuesta22_Multipitch_PhD, + author = {Helena Cuesta}, + year = {2022}, + title = {Data-driven Pitch Content Description of Choral Singing Recordings}, + school = {Universitat Pompeu Fabra, Barcelona, Spain}, +} + @article{BerglinPD22_VisualFeedback_JPM, author = {Jacob Berglin and Peter Q Pfordresher and Steven Demorest}, title = {The effect of visual and auditory feedback on adult poor-pitch remediation}, diff --git a/paper/paper.md b/paper/paper.md index 238cdf3..624c6b3 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -24,20 +24,20 @@ authors: affiliations: - - name: Audoo Ltd., London, United Kingdom + - name: Independent Researcher, Barcelona, Spain index: 1 - name: Independent Researcher, Berlin, Germany index: 2 - name: University of Potsdam, Potsdam, Germany index: 3 -date: 23 May 2025 +date: 30 May 2025 bibliography: paper.bib --- # Summary Polyphonic singing is one of the most widespread forms of music-making. During a performance, singers must constantly adjust their pitch to stay in tune with one another — a complex skill that requires extensive practice. Research has shown that pitch monitoring tools can assist singers in fine-tuning their intonation during a performance [@BerglinPD22_VisualFeedback_JPM]. Specifically, real-time visualizations of the fundamental frequency (F0), which represents the pitch of the singing voice, help singers assess their pitch relative to a fixed reference or other voices. -To support the monitoring of polyphonic singing performances, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record, process, and visualize multiple voices in real time. The GUI displays vocal spectra and estimated F0 trajectories for all singers, as well as the harmonic intervals between them. Additionally, users can adjust visual and algorithmic parameters interactively to accommodate different input devices, microphone signals, singing styles and use cases. Written in Python, `pytch` utilizes the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR] for real-time F0 estimation and `pyqtgraph`[^1] for efficient visualizations of the analysis results. -Our tool builds upon a late-breaking demo in [@KriegerowskiS_Pytch_2017] - which we refer to as version 1. Since then, the tool has been significantly extended with a new real-time graphics engine, a modular audio processing backend that facilitates the integration of additional algorithms, and improved support for a wider range of platforms and recording hardware - which we refer to as version 2. Over its seven years of development, `pytch` has been tested and refined through use in several rehearsals, workshops, and field studies — including Sardinian quartet singing (see demo video[^2]) and traditional Georgian singing (see demo video[^3]). +To support the monitoring of polyphonic singing performances, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record, process, and visualize multiple voices in real time. The GUI displays vocal spectra and estimated F0 trajectories for all singers, as well as the harmonic intervals between them. Additionally, users can adjust visual and algorithmic parameters interactively to accommodate different input devices, microphone signals, singing styles, and use cases. Written in Python, `pytch` utilizes the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR] for real-time F0 estimation and `pyqtgraph`[^1] for efficient visualizations of the analysis results. +Our tool builds upon a late-breaking demo in [@KriegerowskiS_Pytch_2017], which we refer to as version 1. Since then, the tool has been significantly extended with a new real-time graphics engine, a modular audio processing backend that facilitates the integration of additional algorithms, and improved support for a wider range of platforms and recording hardware, which we refer to as version 2. Over its seven years of development, `pytch` has been tested and refined through use in several rehearsals, workshops, and field studies — including Sardinian quartet singing (see demo video[^2]) and traditional Georgian singing (see demo video[^3]). [^1]: [^2]: @@ -45,9 +45,9 @@ Our tool builds upon a late-breaking demo in [@KriegerowskiS_Pytch_2017] - which # Statement of Need -Software that assesses the pitch of a singing voice in real time is mostly known from Karaoke singing applications, such as Let's Sing[^2], Rock Band[^3], or Cantamus[^4]. These tools typically compare the singer’s pitch to a score reference to judge whether notes are ‘correct’ or ‘incorrect’. However, such applications face several limitations when applied to polyphonic or group singing contexts. Most notably, many Karaoke systems can only process one or two singing voices at a time, which is problematic for monitoring group performances. Additionally, software that relies on a score as a reference poses challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether. Finally, existing open-source research software for singing voice processing, like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], lack real-time feedback, preventing an effective feedback loop between singers and their tool. +Software that assesses the pitch of a singing voice in real time is best known from Karaoke singing applications, such as Let's Sing[^2], Rock Band[^3], or Cantamus[^4]. These tools typically compare the singer’s pitch to a score reference to judge whether notes are ‘correct’ or ‘incorrect’. However, such applications face several limitations when applied to polyphonic or group singing contexts. Most notably, many Karaoke systems can only process one or two singing voices at a time, which is problematic for monitoring group performances. Additionally, software that relies on a score as a reference poses challenges for a cappella performances, where singers may drift together in pitch over time while maintaining relative harmony, or in orally-transmitted traditions that may lack a formal score altogether. Finally, existing open-source research software for singing voice processing, like Praat [@Boersma01_Praat_GI], Sonic Visualiser [@CannamLS10_SonicVisualizer_ICMC], and Tarsos [@SixCL13_Tarsos_JNMR], lack real-time feedback, preventing an effective feedback loop between singers and their tool. -To address these challenges, we developed `pytch`. Our tool is currently the only software that enables singers and conductors to monitor and train harmonic interval singing in real time — a skill that is essential in many singing traditions. This includes not only polyphonic genres such as traditional Georgian vocal music [@ScherbaumMRM19_MultimediaRecordings_FMA] or Barbershop singing [@HagermanS80_Barbershop_CITESEER], where precise tuning between voices is stylistically central, but also the practice of non-tempered tuning systems found in various oral traditions. In particular, the vocal spectra can help singers fine-tune the expression of formant frequencies, while melodic and harmonic issues become visible through F0 trajectories and harmonic intervals. Unlike many existing tools, `pytch` does not require a musical score, making it well-suited for rehearsals, ethnomusicological research and pedagogical contexts focused on intonation and harmonic listening. +To address these challenges, we developed `pytch`. Our tool is currently the only software that enables singers and conductors to monitor and train harmonic interval singing in real time — a skill that is essential in many vocal traditions. This includes not only polyphonic genres such as traditional Georgian vocal music [@ScherbaumMRM19_MultimediaRecordings_FMA] or Barbershop singing [@HagermanS80_Barbershop_CITESEER], where precise tuning between voices is stylistically central, but also the practice of non-tempered tuning systems found in various oral traditions. In more detail, the vocal spectra can help singers fine-tune the expression of formant frequencies, while melodic and harmonic issues become visible through F0 trajectories and harmonic intervals. Unlike many existing tools, `pytch` does not require a musical score, making it well-suited for rehearsals, ethnomusicological research and pedagogical contexts focused on intonation and harmonic listening. In addition to its practical applications, `pytch` also provides a flexible platform for music information retrieval (MIR) research on real-time audio processing. Working with real-time data introduces challenges such as a limited audio context for analysis and strict timing constraints to ensure low-latency processing. Researchers can use `pytch` to develop, test, and compare algorithms for tasks like F0 estimation and signal enhancement [@MeierCM24_RealTimePLP_TISMIR]. @@ -59,25 +59,25 @@ In addition to its practical applications, `pytch` also provides a flexible plat # Multitrack Singing Recordings -To fully leverage the capabilities of `pytch`, it is essential to record each singer with an individual microphone. Stereo recordings—such as those captured by a room microphone placed in front of the ensemble—often suffer from overlapping signals, making it difficult to analyze individual voices. While there is no hard limit on the number of channels, we recommend to record up to four individual singers to ensure visibility of the charts and responsiveness of the GUI. Suitable multitrack recordings can be obtained using handheld dynamic microphones or headset microphones. However, these setups are prone to cross-talk, especially when singers are positioned close together. +To fully leverage the capabilities of `pytch`, it is essential to record each singer with an individual microphone. While there is no hard limit on the number of input channels, we recommend recording up to four individual singers to ensure visibility of the charts and responsiveness of the GUI. Stereo recordings—-such as those captured by a room microphone placed in front of the ensemble--are not suitable for the analysis with `pytch`, because contributions of individual voices are difficult to identify from polyphonic mixtures [@Cuesta22_Multipitch_PhD]. Suitable multitrack recordings can be obtained using handheld dynamic microphones or headset microphones. However, these setups are prone to cross-talk, especially when singers are positioned close together. One way to reduce cross-talk is to increase the physical distance between singers or to record them in isolation. However, this is not always feasible, as singers need to hear one another to maintain accurate tuning. An effective workaround is the use of contact microphones, such as throat microphones, which capture vocal fold vibrations directly from the skin of the throat. This method offers a significant advantage: the recorded signals are largely immune to interference from other singers, resulting in much cleaner, more isolated recordings. Throat microphones have successfully been used to record vocal ensembles in several past studies [@Scherbaum16_LarynxMicrophones_IWFMA]. # Audio Processing -The real-time audio processing pipeline implemented in the file `audio.py` is the heart of `pytch` and consists of two main stages: recording and analysis. The recording stage records multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. +The real-time audio processing pipeline implemented in the file `audio.py` is the heart of `pytch` and consists of two main stages: recording and analysis. The recording stage captures multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. -For each channel, the analysis stage computes the audio level in dBFS, a time-frequency representation of the audio signal via the Short-Time Fourier Transform (see [@Mueller21_FMP_SPRINGER] for fundamentals of music processing), and an estimate of the F0 along with a confidence value using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0 estimation algorithms, such as YIN [@CheveigneK02_YIN_JASA] and SWIPE [@CamachoH08_SawtoothWaveform_JASA]. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. It is computationally efficient and well-suited for low-latency applications, but it tends to suffer from estimation errors, particularly confusions with higher harmonics such as the octave. In contrast, SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. While more computationally demanding, SWIPE typically yields more reliable estimates, in particular for vocal input signals. `pytch` allows users to choose between these algorithms depending on their specific needs and system capabilities. The obtained F0 estimates, which are natively computed in the unit Hz are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and vocal characteristics, F0 estimates may exhibit artifacts such as discontinuities or pitch slides, which can make the resulting trajectories difficult to interpret [@RosenzweigSM19_StableF0_ISMIR]. Previous research has shown that using throat microphones can improve the isolation of individual voices in group singing contexts, resulting in cleaner signals and more accurate F0 estimates [@Scherbaum16_LarynxMicrophones_IWFMA]. To further enhance interpretability, `pytch` includes several optional post-processing steps: a confidence threshold to discard estimates with low confidence score, a median filter to smooth the trajectories, and a gradient filter to suppress abrupt pitch slides. As a final step in the audio analysis, the harmonic intervals between the F0 trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. After processing, the pipeline sets a flag that notifies the GUI that new data is ready for visualization. +For each channel, the analysis stage computes the audio level in dBFS, a time--frequency representation of the audio signal via the Short-Time Fourier Transform (see [@Mueller21_FMP_SPRINGER] for fundamentals of music processing), and an estimate of the F0 along with a confidence value, using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0 estimation algorithms, such as YIN [@CheveigneK02_YIN_JASA] and SWIPE [@CamachoH08_SawtoothWaveform_JASA]. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. It is computationally efficient and well-suited for low-latency applications, but it tends to suffer from estimation errors, particularly confusions with higher harmonics such as the octave. In contrast, SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. While more computationally demanding, SWIPE typically yields more reliable estimates, in particular for vocal input signals. `pytch` allows users to choose between these algorithms depending on their specific needs and system capabilities. The obtained F0 estimates, which are natively computed in the unit Hz, are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and vocal characteristics, F0 estimates may exhibit artifacts such as discontinuities or pitch slides, which can make the resulting trajectories difficult to interpret [@RosenzweigSM19_StableF0_ISMIR]. Previous research has shown that using throat microphones can improve the isolation of individual voices in group singing contexts, resulting in cleaner signals and more accurate F0 estimates [@Scherbaum16_LarynxMicrophones_IWFMA]. To further enhance interpretability, `pytch` includes several optional post-processing steps: a confidence threshold to discard estimates with low confidence score, a median filter to smooth the trajectories, and a gradient filter to suppress abrupt pitch slides. As a final step in the audio analysis, the harmonic intervals between the F0 trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. After processing, the pipeline sets a flag that notifies the GUI that new data is ready for visualization. # Graphical User Interface (GUI) -In this section, we provide a step-by-step explanation of the `pytch` GUI implemented in the file `gui.py`. Right after the program start, a startup menu opens in which the user is asked to specify the soundcard, input channels, sampling rate, and window size for processing. Furthermore, the user can choose to store the recorded audio and the F0 trajectories on disk. These configuration choices are required to initialize the audio processing module and the main GUI which is loaded when the user clicks "ok". A screenshot of the main GUI which opens after successful initialization is shown in Figure \autoref{fig:GUI}. +In this section, we provide a step-by-step explanation of the `pytch` GUI implemented in the file `gui.py`. Right after the program start, a startup menu opens in which the user is asked to specify the soundcard, input channels, sampling rate, and window size for processing. Furthermore, the user can choose to store the recorded audio and the F0 trajectories on disk. These configuration choices are required to initialize the audio processing module and the main GUI, which is loaded when the user clicks "ok". A screenshot of the main GUI which opens after successful initialization is shown in Figure \autoref{fig:GUI}. ![`pytch` GUI monitoring three singing voices.\label{fig:GUI}](../pictures/screenshot.png){ width=90% } The main GUI is organized into three horizontal sections. On the left, a control panel provides a start/stop button and allows users to adjust both the visual layout and algorithmic parameters. The central section displays "channel views"--one for each input channel--color-coded for clarity. Each view includes a microphone level meter, a real-time spectrum display with a vertical line marking the current F0 estimate, and a scrolling spectrogram with a 5 second time context. Channels are listed from top to bottom in the order they were selected during setup. Optionally, the bottommost view can display a product signal from all channels. -The right section, referred to as the "trajectory view," provides time-based visualizations of either the F0 trajectories ("pitches" tab) or the harmonic intervals between voices ("differential" tab) with a 10 second time context. Using the controls in the left-side menu, the user can select the F0 estimation algorithm and improve the real-time visualization by adjusting the confidence threshold, the median filter length for smoothing and the tolerance of the gradient filter. F0 and interval trajectories can be displayed with respect to a fixed reference frequency or a dynamic one derived from a selected channel, the lowest, or highest detected voice. Axis limits for this section can also be manually set. +The right section, referred to as the "trajectory view," provides time-based visualizations of either the F0 trajectories ("pitches" tab) or the harmonic intervals between voices ("differential" tab) with a 10 second time context. Using the controls in the left-side menu, the user can select the F0 estimation algorithm and improve the real-time visualization by adjusting the confidence threshold, the median filter length for smoothing, and the tolerance of the gradient filter. F0 and interval trajectories can be displayed with respect to a fixed reference frequency or a dynamic one derived from a selected channel, the lowest, or highest detected voice. Axis limits for this section can also be manually set. # Acknowledgements We would like to thank all the singers who contributed to testing `pytch` during its development. From 93a270e76fcca1a725c5f6855c9e3a5066fcb6de Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Sat, 31 May 2025 17:53:40 +0200 Subject: [PATCH 16/27] activate swipe --- pyproject.toml | 2 +- pytch/audio.py | 14 +++++++++++--- pytch/gui.py | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8fe6ada..aa7a471 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "pytch" -version = "0.3.0" +version = "2.2.0" description = "A Real-Time Pitch Analysis Tool For Polyphonic Music" authors = [ {name = "Pytch Contributors"} diff --git a/pytch/audio.py b/pytch/audio.py index 5d2760c..6bec807 100644 --- a/pytch/audio.py +++ b/pytch/audio.py @@ -342,10 +342,12 @@ def compute_f0(self, audio, lvl): if lvl[0, c] < self.f0_lvl_threshold: continue + audio_tmp = np.concatenate( + (audio[:, c][::-1], audio[:, c], audio[:, c][::-1]) + ) if self.f0_algorithm == "YIN": - # TODO: replace with real-time version, add real-time SWIPE, relax min/max limits f0_tmp, _, conf_tmp = libf0.yin( - np.concatenate((audio[:, c][::-1], audio[:, c], audio[:, c][::-1])), + audio_tmp, Fs=self.fs, N=self.fft_len, H=self.fft_len, @@ -356,7 +358,13 @@ def compute_f0(self, audio, lvl): ) f0[:, c] = np.mean(f0_tmp) # take the center frame conf[:, c] = 1 - np.mean(conf_tmp) - + elif self.f0_algorithm == "SWIPE": + # TODO: replace with real-time version when available + f0_tmp, _, conf_tmp = libf0.swipe( + audio[:, c], Fs=self.fs, H=self.fft_len, F_min=80.0, F_max=640.0 + ) + f0[:, c] = np.mean(f0_tmp) + conf[:, c] = 1 - np.mean(conf_tmp) else: f0[:, c] = np.zeros(f0.shape[0]) conf[:, c] = np.zeros(f0.shape[0]) diff --git a/pytch/gui.py b/pytch/gui.py index 02143b6..531a5e3 100644 --- a/pytch/gui.py +++ b/pytch/gui.py @@ -189,7 +189,7 @@ def __init__(self, sounddevice_idx, channels, fs, fft_size, out_path): self.fs = fs self.fft_size = fft_size self.out_path = out_path - self.f0_algorithms = ["YIN"] + self.f0_algorithms = ["YIN", "SWIPE"] self.buf_len_sec = 30.0 self.spec_scale_types = ["log", "linear"] self.ref_freq_modes = ["fixed", "highest", "lowest"] From 0cdfe4694e0e155cf4a6110081c79d095a4fe47b Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Sat, 31 May 2025 19:58:56 +0200 Subject: [PATCH 17/27] small addition --- paper/paper.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index 624c6b3..ab2fe8e 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -41,7 +41,7 @@ Our tool builds upon a late-breaking demo in [@KriegerowskiS_Pytch_2017], which [^1]: [^2]: -[^3]: +[^3]: # Statement of Need @@ -63,6 +63,11 @@ To fully leverage the capabilities of `pytch`, it is essential to record each si One way to reduce cross-talk is to increase the physical distance between singers or to record them in isolation. However, this is not always feasible, as singers need to hear one another to maintain accurate tuning. An effective workaround is the use of contact microphones, such as throat microphones, which capture vocal fold vibrations directly from the skin of the throat. This method offers a significant advantage: the recorded signals are largely immune to interference from other singers, resulting in much cleaner, more isolated recordings. Throat microphones have successfully been used to record vocal ensembles in several past studies [@Scherbaum16_LarynxMicrophones_IWFMA]. +In addition to live monitoring, `pytch` can also be used to analyze pre-recorded multitrack singing performances. By playing back individual vocal tracks in a digital audio workstation (DAW) and using virtual audio routing tools such as Loopback[^7] (macOS) or BlackHole[^8], these tracks can be streamed into `pytch` as if they were live microphone inputs. This setup allows users to benefit from `pytch`’s real-time visualization and analysis features during evaluation of rehearsals, performances, or field recordings. + +[^7]: +[^8]: + # Audio Processing The real-time audio processing pipeline implemented in the file `audio.py` is the heart of `pytch` and consists of two main stages: recording and analysis. The recording stage captures multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. From 309afbfc5fd2112d0e37af472fbe74800fd66375 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Tue, 3 Jun 2025 19:16:29 +0200 Subject: [PATCH 18/27] new link+comment --- paper/paper.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paper/paper.md b/paper/paper.md index ab2fe8e..6287ee1 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -41,7 +41,7 @@ Our tool builds upon a late-breaking demo in [@KriegerowskiS_Pytch_2017], which [^1]: [^2]: -[^3]: +[^3]: # Statement of Need @@ -63,7 +63,7 @@ To fully leverage the capabilities of `pytch`, it is essential to record each si One way to reduce cross-talk is to increase the physical distance between singers or to record them in isolation. However, this is not always feasible, as singers need to hear one another to maintain accurate tuning. An effective workaround is the use of contact microphones, such as throat microphones, which capture vocal fold vibrations directly from the skin of the throat. This method offers a significant advantage: the recorded signals are largely immune to interference from other singers, resulting in much cleaner, more isolated recordings. Throat microphones have successfully been used to record vocal ensembles in several past studies [@Scherbaum16_LarynxMicrophones_IWFMA]. -In addition to live monitoring, `pytch` can also be used to analyze pre-recorded multitrack singing performances. By playing back individual vocal tracks in a digital audio workstation (DAW) and using virtual audio routing tools such as Loopback[^7] (macOS) or BlackHole[^8], these tracks can be streamed into `pytch` as if they were live microphone inputs. This setup allows users to benefit from `pytch`’s real-time visualization and analysis features during evaluation of rehearsals, performances, or field recordings. +In addition to live monitoring, `pytch` can also be used to analyze pre-recorded multitrack singing performances. By playing back individual vocal tracks in a digital audio workstation (DAW) and using virtual audio routing tools such as Loopback[^7] (macOS) or BlackHole[^8], these tracks can be streamed into `pytch` as if they were live microphone inputs. This setup, which was also used in the demo video[^3], allows users to benefit from `pytch`’s real-time visualization and analysis features during evaluation of rehearsals, performances, or field recordings. [^7]: [^8]: From 52e41a58cc61e16b7b829470abae7338cf8eed63 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Tue, 3 Jun 2025 19:27:02 +0200 Subject: [PATCH 19/27] acknowledgements --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index 6287ee1..6801ca6 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -85,6 +85,6 @@ The main GUI is organized into three horizontal sections. On the left, a control The right section, referred to as the "trajectory view," provides time-based visualizations of either the F0 trajectories ("pitches" tab) or the harmonic intervals between voices ("differential" tab) with a 10 second time context. Using the controls in the left-side menu, the user can select the F0 estimation algorithm and improve the real-time visualization by adjusting the confidence threshold, the median filter length for smoothing, and the tolerance of the gradient filter. F0 and interval trajectories can be displayed with respect to a fixed reference frequency or a dynamic one derived from a selected channel, the lowest, or highest detected voice. Axis limits for this section can also be manually set. # Acknowledgements -We would like to thank all the singers who contributed to testing `pytch` during its development. +We would like to thank Peter Meier and Sebastian Strahl for their help with integrating the real-time F0 algorithms, and all the singers who contributed to testing `pytch` during its development. # References From 11873606e4df0f4a6b790b78646da4a30d561ea1 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Tue, 3 Jun 2025 20:27:16 +0200 Subject: [PATCH 20/27] add menu picture --- paper/paper.md | 8 ++++++-- pictures/menu.png | Bin 0 -> 90067 bytes 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 pictures/menu.png diff --git a/paper/paper.md b/paper/paper.md index 6801ca6..83ec2d0 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -76,9 +76,13 @@ For each channel, the analysis stage computes the audio level in dBFS, a time--f # Graphical User Interface (GUI) -In this section, we provide a step-by-step explanation of the `pytch` GUI implemented in the file `gui.py`. Right after the program start, a startup menu opens in which the user is asked to specify the soundcard, input channels, sampling rate, and window size for processing. Furthermore, the user can choose to store the recorded audio and the F0 trajectories on disk. These configuration choices are required to initialize the audio processing module and the main GUI, which is loaded when the user clicks "ok". A screenshot of the main GUI which opens after successful initialization is shown in Figure \autoref{fig:GUI}. +In this section, we provide a step-by-step explanation of the `pytch` GUI implemented in the file `gui.py`. Right after the program start, a startup menu opens in which the user is asked to specify the soundcard, input channels, sampling rate, and window size for processing (see Figure \autoref{fig:menu}). Furthermore, the user can choose to store the recorded audio and the F0 trajectories on disk. -![`pytch` GUI monitoring three singing voices.\label{fig:GUI}](../pictures/screenshot.png){ width=90% } +![`pytch` startup menu.\label{fig:menu}](../pictures/menu.png){ width=50% } + +These configuration choices are required to initialize the audio processing module and the main GUI, which is loaded when the user clicks "ok". A screenshot of the main GUI which opens after successful initialization is shown in Figure \autoref{fig:GUI}. + +![`pytch` GUI monitoring three singing voices.\label{fig:GUI}](../pictures/screenshot.png){ width=100% } The main GUI is organized into three horizontal sections. On the left, a control panel provides a start/stop button and allows users to adjust both the visual layout and algorithmic parameters. The central section displays "channel views"--one for each input channel--color-coded for clarity. Each view includes a microphone level meter, a real-time spectrum display with a vertical line marking the current F0 estimate, and a scrolling spectrogram with a 5 second time context. Channels are listed from top to bottom in the order they were selected during setup. Optionally, the bottommost view can display a product signal from all channels. diff --git a/pictures/menu.png b/pictures/menu.png new file mode 100644 index 0000000000000000000000000000000000000000..f67c37d157f18252a5ef42fe67b7c80b4bfe829c GIT binary patch literal 90067 zcmZ@f17Kar+BazIG-+(xX>8lJZ5xdm8;xx@Xlyr5V>LD!w>+0HSaMolM2Sww{;;WvF6h%BN zfIp8w7IoxJ3lcs!zz#*+!~=j-7|WiWPOh>whjcWg?~kNL=@0_LPU(;iqjzu>{&7?A z2^>(znN2?m;Lpdttne0qn9c3A4oDEDNajC=td+ApH74wHBK#b{YcJiZn4NhOBs;Lq zG6TVS{}d@uF;e(-2*j|3C6w>^m1gku^9oLX7sh-|so992VIoIwE6;EiPEp?>l3#3r zh_GM!l+mk;+CgnR^WhC*R8q#Veb&%7TF zwg?IlEATbRCbs(pDj*$_#PKke&zuC=I860dG7gyNinIMDQhxO3%)S;4MemhdGoDBz zSL@+B(F{dO1*gI|@OQi?1T};YBmTZvYw*lV`6g{833vgS=V{~Zw;-K`FC8|)TA)Ng0?Jk# z>cmm-%eNeOEVBF}4|P{$(^2QnVje#_1}3#(_BmWWeGBlqy4v`ze6TGt-B$8m<&(Py zIv4L}LQ5Fn=LFW_vM1y%=yjBLR>* zR3hY9#I?Qk1!~ay*jLVA6~`?BSr=!(!LyHKlJ)u9Eo~$S$Bo83ISD2V0Z?~P%nLrE ztzI&Jj_+MX$FH|ffXB>DezLY~1-QteJVbXCuyBw*BtxVrboMSEdwOmhyk~uaKF-0%(zrnRD^~(0T$ctzk;UDEku-eqD zt#oJQZaevvbRcE*hIkr&tO5JRaZKV0V%2j90?QT6s!#h(ffVnPgWQAZN|!mK&xNs2 zX0FrjV}gpR;;HiX?7_9jTnc%5ai>c0fNcZTlPBv zfKD9eyPE?rBYgyGecp*K_$nt~0;J$TaU{t2E@2EGvQK+V3}|EESNzBnAfw$_@(64J zl{qlS5RU>p=Ah)=8urlIAnaW_@4Yy?U?<_%0=_~8#v)<$@LwaN_My~@eL`m23IZqE z6o-osE+E1T16Lqmj=L9!u0x>|`%Ex5#QUAW5v9eyfzT~BRGj=0>ps}9gtIIND_3=2 zJTb~~l7R&w)88~lX%f^Kr1dSW5cQOfIj$#4d+-|n^3*$XLEbg^APip z=MbBfm6e$lvC*8hc1gRDsj=O1Zw{}_M|nZDNolXFxP*E(rR+?eFV|P>89qw1KJR`i z-om4yu)%VPatUk+56P#e&IFP_2@O7Ohi!< znOZ_MhgQAdRn0E#I38`tJVm8=rFJE$CRZb!InM%_g_Z@RC2Zq6bGtd-!spZg#xYFo z5P!z(+9-#Rsa&Di+WE}`DI2{wPSl`Uu!%BJS;v?UmyKA!`^^!g_wd+ zf@j0}#JG=D#k^wGtnYqt{@m-wEDV1FX9Q=4VasaGc+AMe2E!JQRmse0GHWu%a>G2q zq-*0n);tqzuw_KVreWi_@*Odm?qlzWK~k-3eX9(-jF2>1k(RWU3}1#uMt=rc`U&F~ zJ6TqTK$5*uUx43Y}=5Fr82x(=F%U*xndE!r#~sLlJX}ii;{o zH}B+}b~y~Wl{mQX4>`lR(KuJQd2>t~8ea~f5r2((+Wmf*xZN;*a7uBaked%9#v1K; zV!cOWUuECHo6g2*e*I|*K3}ip+m|qHw$>F_ljdj_o&BZbhq>`pnO)VR`2*X{tiz7A zXLPMl+{0I&RU= zT=A3T%D$RryXqd%_LU2LMMDyg7=AmJ$ z;67Gp)@i0`bD-g5b3QJsy(OX{CPDrYM-cz6h3e8b(PcKVgm|iNzhpmRznC%K&~)I_ zN0UC;h*iu-x(Pm8jg?jR40jQ4Cr}A4C4Fk-i%T;=yq&H_BlGiBGe!(vq^` zYgsGIy!KD&7r2yFlp>VuS@7B}68&TL@I`=1~b2(NKERniA zE!cK+Tg_m46naY?ZqJL>{`H6t{qLkcNlHp}&>+*a++CGqkNTeKq<1v9q%GG)jfRhs zj@nh7>nJoTTA6f@43-a6m1&J@>a;zaq)S-QCAKEoYD(#pt1s)K>(19q7_MHr=#4Os z+9}_u9c$P%`(8ZUVh3Ya*X)4z^qQiYg^EUNQZuXH(TolB^2Bq` zOQGkN?wj}%JvVXJ2DN_W%qrLFsx^ydCk=Sb{E~x}q`IS#Q~y=?MubMK8;0A&<2PPt zUa#L$TF-W0YM#d-zd>3eYT{}8g*lWoBa~+7F6d-ytgpq+PP)Fows*=`+s-A97IS4u zV^hJI<7qv{`0*{!sC^JN`u0<7);?#N!==Mj!S0vhfy`E4&IjkCv>N8UuMn9$F40b> zcOGX~GVi$QaXN-N)cuA}6m>pYjppj<`0>mh&TG0YK0EfJ#ZX7t7%fHX{Al;Q)a+`k zU+`F5y2abnz3>@-EJTFESAMa5e9Urf zy2(cb!~4t~;p2Q;yK(W6{E}Rc6P>$|ht|jRvf`oT>Z)iWXM)-<@-=_^!*%~&RUTjC zi}vgE?oM~-(~IQ+-oomIBj(oKnV|(ZUWM>Sn=LK|Ac=qxOn~L&iy?jvXxCovP_hti zX8(qB&q~V<|Aeh{BH z0YC!pP=KG{9MJ#t29wSK`%fD*9rz9)pd=(A0sOCItt@{oC4?ib5+px;u=l> z06OW<4~T>!$vH4KTDDMDcUG5?<}|Xip))YHGc=)dx3T{@4uIR86WFvdaW)`ux3RW$ z;&kUB`lAOYu>JEfJrThlU7W3Wh}31|354t%O$gZN80Z*?cwq<#2)G@MO*s{XMgIZ^ z-tiEbJ3HHR($l-SxzV{X)7d$i(KB*zaL_X_(K9j80(;OpdDuD|xYOD?5&t>Jf5s6u zaWZnWuy?kwvnBXBu7RPQi!%=q(a(wg{`qsAChivhoypeeFSLLN(*Jxz&q&8W|G&VT zElmF(u%B=K1p5Q8Kd0mVIT)vklZm5{osEr&tuya`DbD@JOuxMR?>hekl(%pwe?b03fm6}M$N002KgLRdiA9ppF@(jU8jZZLb~2bY?4kQazihGE%( z{)|>hE*W8B3?M*W5GH`1f{-K;Nn8+S_1Y3e5E@B*zrEF;u^HQ*enF1Y=Zvp>>f}Vi zt;}uu;MIEdbYfy`;>EQvv*~Ocd%dZ{NmrPJ2#u5!1|Vp$tW%PP|YUx=tg(Y4`$(qo6_Ww5vqfkVo}h59A(u+ zDF%#0abd*#t*iUKvs)=EtMWxr&S?U1M631y|Ib3VM)>o!!kmY-JJy&=@~;G|$@La% z+XR_|SH|9}!~@4=q)4MxqR$JfaT4G6%lL zq+Xh1A0mYG(B_>X6$n#b-xDjU=+G5*__LIgLD-b%7AmAGD@3rc55IVw5v{-6G#T%c zVPl`bk0^-KDwgTycDSO<=ZEC-3tS6Rypi=!sp+C8$dCgi;s4{Bv;oK2E4Ag)?#8Fs z_AOEpP>*7}<3FC>=YKH!9)`?hdTp-JWSi-Iv#71nYFvAbKe?`+MMXs=gXunaKezQZ zyaWXLK}Hy!Up`ecWA0cND>m6wJxy?WWtN7twO#{_(p))!Uw9x2lg#X8{q~x__s0ey zs~2m(Lfn|cUFfeg@&~^R^sims&yMw+Wo>E@cae$m96iyek-=|!4PYsGTa;4)yfw1&qlR}eC)lwS1_yI-xQk3;Sa zwIswo=EpQy_hY85M`aw@fPczDMpiNPTQ$TPdQR1}JD zQ3ybOxhR}L?H7a4EH1wfGr67a)fpe4u)^4osHmur5rBt~2|Po8aJEjALnGBGq0X{Y z)~u05EBtY^Fd{LCM;VG!Q!U!Bq{c%f^}$!J=J~hf(}CJASSfs*+0BSaF%SwJzT8<) zh4Ko+It`-EdVzfiLZMMnYcrjzk|!uIEt!{3!1?^j!m%|8COrvpccSo%lmaC-rwN&V z3{IpiH(rVlN?t202G^p)Y_*kTXiL+HaG0%9s7}Ha=U= zzNmH6ViHm_nX}6n6gn30kBk3|#9tq2*`~9yVs2Byg|iX`bj{!{ejUzOai>JYY-rVP ziH>t{dSgw(-%R8`V*MZ1LPP0;YDt@O>GSg~)))jq3*pV%e}9Rc`0}=^KpM;G>CO({ zR4}ABB}VXf$bR7*6_-2-RB}ljolMN-;2igtu+~kL{ky_U*Kd*#@L1~7-0oGsv4RAN z7(lHgB2uWVrXuO1Q(0s+8CnxEt<~ZE;kXKQmT2w$hu=j$A`Y7~mYLufYS1yQSV64nd&l-d3B_=0JBt6jG94xCn926JH zWlxk1FLr!Y`x>u7&6nt5wL7%s0&8pnb{7s;fk8t27!~L6Og5AZ))Y4mVH;P^R%7^! zs8EoBLIs9~dC+pM#-B~*%YMXh*k5zpf)+DzIV}uX%pM9!c_qfyeXhQyq*j8j)D9%I z?5Bb*Yc1$4c0N`vDO@zSIZDqcFl{PTtl#5RWV9%^Z9z}%ce{^3t5Y1bo4YQJix`p2 z>}pcZPy_XuxT9vX0`K;W>P9HEJm2W9o5~TGuGZ_cwOId@gg;vTF*{))xr0OzH?|ID zM@^IHC8w7j2S(!KK!lxYr5anXNt1!s=xOh5Qg{&AUXf4Sit?!Df9-wF0!2B!lvP#5cRW8x3JE26H(IAD+&J3UJlv)*e-F@T8uniI zF2!0=qe|4{U*1E!8^3U94d9oTYN7E6jWn zc005Ga#!aImHckt=obe;OAx@Rilos!K<| zH57B`#v%4~nsMq4)<#jd^g`Y>KLUjxiNpH`%h9=&wC>4f3>7MuX980(6=(UXx@`F;t|Y z3K6zPz6oNQ<0$Bvj79?^c^_ckxR_|^Yj_$@tTO43AsH!H!gJX$jn4)NNKEtX8qH4u z_C3p_4@mlo#{8$SM~Umt-p+~;{>_g+8seD%=lmSc><`Komqs)S>%4QN3tN-{j8azF z>4d#(m}Gg|)&#Y(?FzBZS~EHJ_}y z3yS=b$u>qIRMOd=7i=RFoM)_B6JS%^?&~N25QAlSm|nj{Y=-|D|Eo14kpT#~j8IcW z>Tb}hl}m(2&kZ|2p@4ETNGjt_%eJ}h;(mJ#Bo2+FqXdhQR_&zuP-XvD; zXusSgcfuOD;f)r?h_(vdIg1faZ(9%pp@ed5qD#i9(`}QuW@zeBBhYv;9)nwvSctSP zBOu&Ms;d%E8tVkYotz12y2WA))4z$1r|#BJm%YavA=B#v4=t%KOHoVdh^^eM#rxG# zVS&~#+`H@}D1-TFltQpi4!J}(67fJpIqjLVw51fZwGKER%b4kv3%;(pUSt*?{K!x? ztF)n?KMyZ?#DmGJg0Fs7!`hpepB)2NI909Ewh#=?*iIKJxh#qZ=P1LQ;piyCRTn5b zjR2s`Ux?*un80Mnr>d5qgtRiF1ta+Bfe9r%hu6@zOANB*ExjfjWVTgkeU&q6bGd4Q5wEaNoXj9@=D(1(v}u zYLZyp`F*N{8!oFS-e;Pde7aJYA?aC}-z7R7KqM#R2DLvO)8R%oPm0rMu~uwKrd{#n z$noLsvm5@&oS4_ksn0;!_Y%^Qp5{vdy{;9R)N?i^jZWXpG_&CnUIo^(Xckp-b8{II7B(0A29c)Uan{)eIBdDp-jffjyX4_{PF+Q$ zwq#gZS=lsN(7&t9sNRmArDgJ@$#-sDIhO9faHc#%_|EtaeF4NMEwIk~88qRe-*4Vq zJh?v4`4d*;lC*2O_f#~8mMYys>;{&HtQ*H+wCh4;Uap0WWcco+bMe`kF~2W06yK>h ziwyDf2YI)U>6Mj&sS~7K95E-Nt2C+~?z`fqU5d&wR#wr&D6k&Uu149upV%Iip{e6` zcTE%M6Zp!pRm1S(-sJz_f{dOwUpAU@elP!sI~4tvLdTb9Z|=t;tn}38r1USIaDc8P z;Q2tFg63tQXdseS8iBaj%#QOYTGfE_bn5iWuwm;BU15oj8op%m9Kt6>irM5X9Ma5A zQLfn_j(PeD^GbCL5op#tQu(Y;&h`B95sj{C$?q3GiCU;Oz-LSM&l)uEYhE|kavCjH z5x1xs-RDu)9=F=eS89x9#4Z2ktwBSBvlIn>Jqtq4V=*W)c1wiAq3J+}r>vTuPifL| zDVKlso#Tr<#qdf=&Wu3~E$NW*M2f=yG6s zKF1i7Wh4`%vsRZpxQnW)O5uF!`xdcl72k22C+YWB6uqvC?Gf>-7Sr7bcb`Zn@(MYC zUR?C&mS&RQHlBFE!(3R!iJc8RsRoI|=Q5I&3nxW8f?!NC8xD#Q=S#k=%W&dX zt=~Kr@||52>rFDF%DBmvu_2a}L@>__4;IV=cM3y<1d=NcO z+z(Ci!wKaahDyCofu2f(?NIZT%j=Ds2BSgtrHBOEGz|8(lK2l#6h}(s+V64LTyBmQ z=x26^(Xnla0k{ayI9*=^}`7)N-6l3mpM+L!jy1&0P^i}zXMZ&%-OHJtWV z4J`dotY20ZSWT0Rl4amBKPS-WwAIN#$b)o9B%0%#q%fCJ_%NYiwJho+quV>o5d#b+ zGdLC8is}~qX^oo3+#a)sTWmF{FZ0bsmjYarC@b4VcoA z8LyK)DqFP5md-$<=5wDU&?uIaO$fmd%U~f3w1}y;Da;(roLXYZo;!-P4Rk!5UnuCt zo3GTOFJacIdklLnEc>h|2qhcP_4dxL0bfU<+Y(a1E7mrdBImWyTD)4o#Ha9>*Ki#{ zB=tYx-d_qHO9C>d^@R06-7mxJIjWsd<;<__6awl&vw>O%V(QF4>$h zW2R4m6<$J_Ns2kH4edN(d^TxxI_-i}ORPAA*Sv@jd|TI61d%rn33{(x zhr7q2D26BwqA1hmuCVWD5;uE8f4Wc7E9O=l?lbu&<9T+{eca{^<|us*gwF1H$l`9i zL;f-K{JhODRl0XkI83MJY2t3&q42-@L;Mlo8}SQRQy!`E6BMwNsz~%+>p=(!AcMYg z$#gn-j7_YETFM7q(^Rti>qg!QPSV#;nKnwYP0@C-J~&jy5SNFUrLSKdOO9&VxvxP zB_(NmPH85&4u&`?s-50z`mrn284a_9z$UQ{{-A;=yu zy7ka_6p6j|&0*VkfAW}G>T|7F^lP*-IQ7nfM~sBd97HM57aFc>kTK7C5i%DN&*;Hq z>?S%~n3FWI`QYtWUSCS1k*fWRJIbF9jFxgSm)urp6M!9$+g=3Wgm0|l7MOW0*QaxV^ffQSky-cEu*OuhhSZ>)v;rDo|e zd~-BIMdu=u)Kc0As%bqH3OET&&^F%nN%xXzsqdA*9B@m?z+**g=Wm{{dD|`eJoz4m zG&NR+Nd4le5(Kb7zZWu|U9f%?ivBp}{(XX_{Ge5RW`#rXc35nh%mnfUU1!fU*~RdauHZud$eEagni#@+YwuGYk;$&D6HJ_(AKP$gPfoqfe1`1|qiAQ2P( z$xvWBB8mS!PO`l^tuzWcG&lw@t8E9K#s2F={9>SN@y&edn)43yO25I2=Y&Cb^#nUe zzZ@Pq3Pd{mW?6u|@)rUGT;!0yv9huR-b&^)G$xp`CneB7TN(mN3KRA2$*4oo1MI<5TQBZS4i3*YZ+L@OjE>v$jc`}zRT z@4*=$6UwBw5KHy&{*^Nn&*1@yG!op0)J$D++C=}2$Z@FtQdxRnACKh8sH8=F>Cdl! zVVI;H z6J_P{X;c2A1|i1p(o7J*2AXRxu5(EQk{K%@wDYg@vp+Z-h%Z95(^mp#{O9PBjdkp51G_`u2hpBhD0`_Eo?Squ!uLt)1 zzL2$t!o%U>UyfrcbTG1N8_AGIYQ#O+M$2zn>rApC-ToH2v$`Od&I9+J;Wsh?1v%9p zF*USwZm+mUxsM%GF7sEbM8FFoJaV-#H$SjGs{A{JC=5Y;S?TUaM@C4Hpx!btomF^|kKk$Nz9v&{uysVfKC|IL;D87l?kkpo@)xUdlPrVbAgxnRKW zrjJ0fE1aY%?lLd{ko#}8xI+mOVO{4Ia`btma_cus{K``K6`;%b4r-rUx_qIcut$02 z7W8Y@ukzL<1Q5XY#yah8{x0D4Klt82zJWj4Dfjr+HtyHtTr5A(mMBPUe?Pz|LKqq?)O|@xI3MulfI8c>^+3sRL#6_0bL|Au4&bO>a-*DFK7mjdjb9c`cLj z%Fppiay*S(2*XBy!SWY_r)OjoHZ(NMaHpNN?PYm^4=$Q!*cd;aG<|JoZKGSCoh30b zF_CDjO>HK5Ln1)tC8V809O+9?t#UcFg!D`?|N&qD|AP zXBj0o-5=V=ABS?c^^yL2ZYUnp*L+02CwbLMO;V$Q$hhGI^3iCnQws8IZ^wDGl8jR44z0*QpJHJkP{sTEXN z=D1JO;3=kz`{MSaun-KG36^r3_V35bTC2ZxzWkt1W3@__PGc%mqmUj=rcGG3>s$qJ zKkVnDzJ!qwwV6*5wYbDP%ly)WEgHX4BQxSkT*srqsnpDPsld2}X&4bOc8B#qrJ zi@%oRsG@$Fw{bz&Yn&n#nC`bw8yrcZ-dTTrX}Z_;d;SslHJm)_8yzCMIDsB?P7Dk# zrFxzzS?l*7VoOpw{{9Slcc628o7S=WAm!+AeRvgui++pYYnMo@w-`Z zk~4aP$z&Yn{c^u|*#k?D0XzJ7)=588B$EK-qte6e**dj>0Ob>Kt!6Y<2UOA?XZIe>;?IhcSF?K0UU)Xa9wrKy}JX;e0 zYv{e(pkKwCt6%TG`Xv*`aM$Ue9v&U-0GFF6hUd}OoYdWa+%O|+l@f}L0HVG>!FAS_ zS_dw2u$y6b%HVoXTAal+GTrKOGzL2(guo%3Q=%wNunsLeOZyccCiJpB)1nEnV>sLA zCJm@L1Mk0HJ!YuQ@J_CCnCxyYUig**}uNO-KGwNhn_ zKcC}1AC^~t4?^T+K&&)A5^W)sN`_jq?2cE1qAMQG!oK>`Dip{8z6Mu!zAP#GGj`~j zKyy)1Q;$ov`#wLUdDn=O#%uG~P5U4eKdwkS`|1>~yKhHbj; z%E#h)pLeUfxQYwX5Kh26&|sW>vg8A&51R@FmPkE5X?~HFYk=X1Lsk;|KFj8Xzkhmmv?LU&6N)GzAoA0_TQRU<(b_4wA!TB)6dt*Y;?i7~2 z<+v|`WhO@Iii$+>eD4e}S%LKp#F0?=<_#VIL_40-7$cMCwRygEx8(;1IWZiE;I`W} zuai3BR@^0S$M52$<>gjQUv2pn0+w+vaXt5Q*))8gkMuHF;Hsj?DIj6rzU9$gc)nSd zL&5=7_|OFd=ClpHf{2U*zJTv>yYA<==(=n(eq-l*g}t4HQH?X*1#TPp=OKcCHL0%E z`++OoIAs-;G=HR-V!4!$^c~GYpQW?dajBe03odb7(U|EWlss&Spun=m?FWO#via7n ziEV(=>87@N(0WqOKiubD9T5=`e2*+{wC;1u`UsR3jc)Z*_ds8m2e^IW zJuNDs~Vt=VAH5B39i zdL!CX$@4b;E5nq=2Hz377Wx50r=G`767HID+tmb@|ZAa7I8_g%bdoR0I(=My`wHr&8DsF(RWWd9$d)OQy_dJSs1J&1qo5ci8; zeRpXMKhw8gD_-SC;-lRe{uQivl7qC#Q`6J4liz+%kaKmPX#R>N+20L`ztX*f>oM)K z)_inZRs4YCnj-h3#Ui`+l+&88Z&}T8&!Kvo1MVX(_;wtZ62A>cq`^S*VR5#v=DFYk zH7q(!EUN5#LbhG#$0@l?X0zz0r#-$Dpb41K63(I6ctiP?>>70ilzN8m^TkMc(52)Ck4ajM9Cfv8{o>@mqPjT^+g^U(9&9 zbw=Bz>>P1X=X=y%o&X>ht5Xgb;4b6Bf$1MPfjl6emGMI|59)!d(AnF+4aqG4I zs0(RArK_V;i0d+|07`mn=l7^ZbF`D9XFDAmjlZW2MMI~$VY`R^i7!#`4m~9?5u+c} z&hI=Qf)p(7rUsGEP!&^v>m&yE?XUnZs9P3eN}Jp*A`P-0ayNyO=X)-wZ-=rCGzk!+4>1 zZ_><<CDy?+I$q_ruH_DMn;>Y1&8bCt_H;W_3PX7>GIgdDX$bwe9QK zelH$CVOCpGF{oyJ!TI#>;*KRJ>C}@6^g;XGCUXQ#z3Ru=5_%E1##X9phzz~4X!PuH{Y8)^$-yPj ztBhP+8W#uJbZW%A8&OO)5Zr<1KAJMLz$Dr5{O{z8A3Dpa@Nj;KW@{U?WLtOg?MwNA=4 zFYzrGOV9j~uIaR&bfb0a!qTel8RuIWsE~Mql5jNp9@stn=Y6s1Z{N&mwCwE~dR}5k zk^L-q`k#>JsB=_q(7RJcsHAQ_F$_RdsUmroqCgXkFFNp^ayQr7nBnV93!*)p3my13 z>!8=#43cjWZmK5NAc7uY`Pvhl(|&ebaTrWf@rE{ta)hnmx_N(b=)Bi>blQ3`Fh(Wi z%9pIB=hIm0xd{R}iU#i6)m%zwKU_Z&FEPpI^Kgxvnn}pez1+k=Ch)p(g0|MWaW~(& zbh;h?hYz3QhTH|q;m)5^5B#DZsGA2j7z5#xTj!$=a|pUoN?UTVbT)gqc+n${>}^Y0 z32tdYZ>!-Odh&Y0{o>{%QZ4eTBs6WXri~UvDQMk&O_Sl%!H?Q%@6*D^P`;@j`^!ysp|#?tj(4?TZV0L#y%+WfP!OBw zca4A9-H0GSB3ZL_?<&q$>gL>hVkz)f0)GmIE6h z&gFT|4|ShoVr6+wnbyqeSNF2GF?^DQ1K%~Lx?^J&!l4`Og zSlKLN+d;!(Kd?0;)DRd*a}Kx|hzoKrh`He&zcyR+X~l@wlyVr`;yk+Y>V6iK=!Fl! zX3zS@E`D)CSV^tKVoT=SlZ&x23i(ka7kcfzdLf8{eT6qrNnGbBg2Un$(sB?eblu(g zk3y25Ro^#h;hmBnMMEmN4izP&y5dwtXk5QJ_L^=6m_CQKM0aBWLmY1GYJ z9Q3e3uAPvv=rJPRE37QJI#f2H*uM@D&$gm9m$Z;hE#tQJWAS>sL$Y&FNJH&JQLu(3 zL%xCDoULS~26DFnM9*5h%r?)yuqM9b`CE)rmoJ6!e@;Z6qjbGj_nz$TdvEGaOMnEL zm%F)RNteYA;&+Oq-@TNEjtKXzj~M#M;1=26Jp~?Hr`#$pcqEIXdQ+`OHS_ z2&4hKULu&=FYolcr&hk~%New>^FrAMa`blv$8!hx^X0%Kl;@{;Ppze(@&#k8Ck{a_ zbRhdbYH%MZ<~3oS-s5?ur$+?`x)J~a(YP@f(4lTY1Wxu9g@h5(%qKR0$Tp{~2Ek1(d zsZs62Cby#8SaiJ{CeNUxJdX@D1I;pPabGjO{HduHUzrm~K`jl&kaSZ8OTmk)xFs1H zK;q5_Qe&o1F5+di7-m5eb_5xoKm$5ALUvuh^m0e0YLeiSBv`GgJUUD$70a^X!!-)z zP&X$KMG$C9cjMv-V8F^sL<8PV%+J@{5pR8LpOdRHKO6sXvuroW!$G0sdK?e$?H_3i zzsl7$q53_mX{e}Qi%hY3Ye$A0-cAq60Ri*_qHm?@xhg0!2%)W>`4_&l!bT!OV^ZI? z3a%^}xgvz`wcQ*w(ehgt;Cy4}T!S%r@N4=)MM4MpUv#)FrHTI_0RRNfQLGrvD2tI# z#>{Y#nJ^{_G~?r*4B6LB=j@ENE7`p_oJ%}5ODsxgAvF3`*%RP{U5Ny#Rp4v=Wi$+Y)zndP=AoOmT|p%1hT!f0kX8u`&N9KlM zYVfccP4UVPkC#KW&WOkD@Zp1FebJ^K@T2f{5Jr#;waHqszGUGoUsxmJB9f5|Th28G z{WXxLQY=K+>Nf+ewcWw6N>ys-#G&FI=Ry74in_JFweQV>pM_JEyfqjXZKwhvkw*m1 zgC2n4sT)N!Jzs@>ingP~aqBlo45nVm5cV+LfHyEFgM(h}leVAO2TFg004Ua+_SpW& z9rz1%O+bgS54taB4ZXpE@%Oohq>g}Z$vn6sn{pAq<@j{!H1oMPL{`k*7%SmW z#CI26F{Hbgfg6LD!07X1Ko=tTn&Re1s8@C84LOdTw^v{QYRI)L8l6^_ONB!5c^6V^;37bLuPZkM0_;n51L8rBkbG&r^1XU* zXF=7|4bXH;LMX8fkP2)#?x8*HcQ?FzGm7a?6#n!vN_Kxd+Z>zD9qJXn5TTni$3B3g zCvzJT%^oiu$pNJzMi{67D^jL4TeI@#lmpr3D z<_DksGeRh+hk#T+n1h-;U8ix@BDs&3YrHs*eBmXW2m42%y_G1}kKVQH51H={R=sjP z@~`0aPK%<`4yv|s`pjIExMf%1e{mi`;P7p)J!4}w^FSnti!=d}nd*SLkGs&S` z2yl$40*4qfp?#(GlQ?cyoVHDckf6BVcl@Zd!%S(#wrv(TzkngyAW3h2*NvaUpdm4b z`!!hzJNMWij}?xnAMv9a5&}9*tX58qpn%W!II+m=svmK#d)+bDI&Q0T1~}h?V|%$^-9^Bk1)_ z;Fni2hJabFDCbDj9>g#$fs4=B_vdf;uEv<(>?%^vlaAYxYr4A9+28j1tEj7=7j|P* z07KSlr5%jZkg%GqERZEUY(3j%x()5ss$ii1C;vWoh<~FDYKJ=R+vnv%cKo%FF0|5^NvP&A}}^ttKm=U*-*mPoNv!Q z`m>bO9tz;*JZD0!+Px^M>AC_(PsX-uo8o?4`RXP=^xeZK+spN!C>==m%ssMb_!!vc z>D*5JH+v}&8`|gbXhHy#@zC%+CT{JG2YmXN9fOpMXuC%RSBtBS9o7eJT~fGb_BN<> z-^UXf)kLT444!12rH7TDWIN&gVw1wQ+TQSZJl#&eYGrd&C#6MBwkLuQDH+!o(o+}W z6cQi7i?%|F%rQy(({XavW`{{=h_E|*Mu_5m>e9h=dMg1_@y?w%ke(?ooPPrmww}z} zAQ#|_E;1MeZKt+7o?)haxiPZap0?`Ie$-s*z^$F?&bPMq)Uqb(%-2jJxS_iD9Jm8K z1L%7fO_T#cV=tc-4-Xmb2uzJK+hrhq0$Tu+R5A-}GQ2am;Kksg?)CYTW*_7LYtZ)C zz4Z{8QqA68dRb(gqt03E;fIdJ#eX=^;YRuRfdH7)ShlHA1vhNe%HAZu?!Y%>TeP~) zw(URWaTC*oO_*f{CqadmK3snH@kqB=ZK28r8sS;IYopDG(4+9j#pouv#O&v6VdK7Q zSbVSV0vEXM8_03Mlw`j7AY;E$DvHteuO~_g0t^WNAHVk^t^jK*q~T^VbP!0~V!zft zfK_4?|GhYfVN^e*lDRK2zn^3yk)H+MSi?~GK>4krql|$aO>!gDTCcGbixWHO8M)x! zRU!iYpkVN40Y3#)0g|SPycRT8oGk8N3QcbrK`@e@c>CVTF?t&#GjEsvRs?$s0@D2b z-iI@qGG_z@t%#_3i(BN^MF$i>6YK{8FX2xfq49LXk-t|#<>A1U)h@*}f6)I{K?9bx z2}HYuhkG%K?8A&w4VC}>tp-UzZ}QU%fJT}#75c^FKhEWU3o`*20rJfvPo#yUB3zJ4 zk>F~-*Ic?p0HTFJ_%I5DSML8+ECO(%IFtI3pyWy6ox%Ky8L&i(+yz{ldMNz|R!Kb5 zn%~cf0&^4%$o{}`Ig5HvxYcs1eblcN8DNz&0O+U5YL5au+HikgS`IFdwB3kb@uBVx z;ujF$*?;R0kqHQ)z4yT)t!Bk3>QO#+1N)l&>-affKP!x(Ec;;prYx{_-08V}ucetz ziUifL-b?#CZomp;4V}nr^IJHGKrB6O?LG?k!@pVA&#(V*eb8^84ir%e3{dh+XGPs6 zG{SyH2b;+=#Ppi2Pai3(tKVUZ{ijM5OBeu+N;Z^8sl-(Ap&wXQVB27u` z0r-+qQs7H}=gR*^)KMl!URRr-qM36?f&m2@txYPcF}fvXU{78M#It`_pw(+N<8LjP zoB==c7V46cUu<4w*g3jd`U^kvFW1RarcQ`spYGa^g15k zpdW+)oNunfa>BkoO$xM7u+&DxXh{U-1>>Lb`aFoMs3avOC0&SpnWUpx{`1I6EGwwP zCt9W|Y@ZPM5)okLzp$!GHa$IktbU=ATft%kgP9sAx%&p7-8p5Fhc)Qe@65#=o{*Li z8#)IDM6W(ueXB759y?m=b@y=pi(C5~pt_{PoF!f}N;bJakc7!Wvo9}UFd6iLh4ViX z?`E|=5L&PaIz^r|yFZI|0a;`KT(B#KH=~lHk)@@@*#Ae>S%+oOZEc*Cl1@SDk$7k% zrMp`M=|(_Gy1PTVyOHkhknZm8?gqc%jdRZTk9b|i%9-v4zi;`p=`WLck3G-A6UXsMv`PEyy91vam*aWpkBm(P)u;@;k%~cMjp@~<> z7VQU08`v%sU7F!1PMYJ}?!aYSDImUX&d95Hmt@X(+p1O#d=?Udj?Hy#ot$I)>*l>I zKImYBz(Tr&t)}tv2}_cg6U$DUtc-=I?|#D{0$D#|bgM1vK(uBSZ*#rxgs+i}h=||5 z_E;xYIQ=9rLlC_nu8{S05&crz;r+&ptI?5r;WpzKwGfhs>7@Luujgd{zKjOCXQv)^ zW_c;U?t^P=-=>YQmNxCh^_$4Jx4Q)l2){mm`A|6sADK-NY{oXm@r@rFtZx-eEd$hJ z19`H~Q~c$v2Bu?;T7Vbc0D?$`#wgxbSBju_vR}f`fB^rqAmoQ&vPNz7MZC!iT1B=R zzY2f*Rq1BDa&sD1{*l*H`e5Kzdfj47dM zcLpA*G1~L~c>?<3*(;-B!gjY~k|%EZqTJW6pt^cWxI?OrH}3D#6^WN1rA{pQo``(d zL6XRb)90;<`FW+d=9C}B9=`G??#632fE` zb>XWPSH7H@{?1wY{h~@@Mry&@J7hnEQ8Q|36wDVu(F4;$?c7QwAEAmNGD{N4LhQKz z8;`MQAi*MNv@CliR$X08N`@w#q9BG;>WdRqkeIAwi))j9G7g7@lcVxKw}g;z9{&JM z@V1*f3~BRAv0wikrs#)LEYU$l(`}&MPJP;PoAwUu0+j0zcB=NM`txgZ2G@W(C7UH@ z-IBFbBDa%u?mrG5BITKvkfR0cwoLd)qKKPf^Qw=R%{c zMy{-G$7aumy;S6GKMp!;_ke`?avaGgteQGR%KY|Gdccgb^+pMtfWr zX}|mZZ6Joqx*J{0Hj!`s?Q0csM0k}fKh61j9gTFsiWH#cH7)^J9+Z2La!HgIG+fdn zjI=E8y#b~j_57tQg|=L!M4sCaKu3*nK&nJ%%PM$Txx|GxTZ`?slIqvb&2Kgi{_j|+34!3JSYV@)Ruldl8i~+WQDxB@c%KPF^ zE{9KR{inh_j$B@2b*k3w7!p(!7cw*>$K}2?>vSLkS;W<QYF>tWwBFw7n2LeJuo4cW*3tU7jCMU6tR035cM@_gD~m?*3iFR0PJ5!j~sjT z{zCb-y!jbDO`4FT&o+~fiPuCzaQcB^WvDV5Wn=M2C8l?Kyopt<`g|r9*kuJMbgZRr zCz|DC{3eaZwTmz~(Dl!mLehTyG6g?oCmZ5l6Y!XCDc2bSKA{^hU%6?1ko#Wm0}sm* zEdcNmRn})6aEr@-fwP{CZ3Veh2q<_(Tgt)mTY^e)M2Y)Yyt3Fi0a=pK!yzmx$&DD* z9dr$jT1|($f1SyTLADfNGE(Q!5?c3>=5+;Ii8i3Ayg_qZYdkEC zetCXO7HYm(Ff}1W#6>5xRUy`y7JuD5VF|ArY<82p8VX{L_nSXis%T|6(yNs7;}Z~6 zO^|`hbb`)>2-wWTi$-iu$V211I0V*E4wT~}{H-kGwVVq=-*ZeXF^_UzzKz0^%$vp8 zY&p9i(R_nI(DRxwaF9X_VrX0rVH~Eq7Bd+TTvLO=c)bCEu z%;ZhE+U|W+Ax893!ZBkE0)Ui9>$~*;%3@UIs$`4CeweuOI8I;V0Kda-Ou3`#;=0c< zVq)$+q3^!UI<#IA>LZ-*49>7=;Ad)Z->vynTIb3>pvUSkf*((s$}|9hi0S(jI639Y zdS@uHhj6l}{sgG&TmVt!(>6bP!IIS732FHogI+=htkBzF${&+1Ck?wB$HBRV&gNEp z&nFIT(cyeADgc5rJQyRxzWgPO@5QmB<~6(BqJ#uX!Pk1qeXaCs+*EB?Gf|4miX%_< z-=-{NaeIVBe)!>?%_v|}NOj!kj(OIhi9Y*^P-V0u_*S33GaDaCI%dJWyEmA60_NO^ z?Zf20S-#7fzyF&=K95$auuV!Dc*L{UGd;k#Hi60+JE$R$e$Q%q&>5vzi8ejYd5R$D z>&%pCuUZh(BH|JIjHYndY#H%ACMx7C8eW*KwF-P`5+in-I^wvQ)8Ebtp}*J~ILUi- zJW1++viQEYCzOcKJvaT~hf!$N3!q>o?Kx>t^gRH&b^m^TIz1})nI`}Y(972fp|U;=6vz8BF84H`L{<1%Q`8l^eq1q~Syc$tQ>X#lv*j(fW! z7qDsZzhHQ7=hJva*$%~pA*~2~SCu3yXCZWJ>%)Gif^pzB`+J{CfNyU!$l98oW$n@M z3P$j})zFpLlYK>mGyu^sFIz)FK{qDBAPn{Q4p66JAHXOUYelZkYfFUfs+1^vOO&Gyi)9Q2StBwyf2uu4uoaZLN>a^*ww8A$ji!8u zq>^Z^b9w@iR=J!vlkhy|&`d);G{1`n*3V^D(}6Yfr~`XOT;*dX3!(j-?q?+>!{pyU z3eKN~F5c65)!vJLCcxJZPb(uZL~pz}3=`89)Lnv-_@8Hs%NuSOKV8uZ2sc z(6C*g2^5yJF4MN19CaqkpetCv8J(8!uHEg#6B;FOn#W{b&=W5v77RN?`a)%F?QLq?HR^~SAfcAV>+?ZcjQtCmB(N}KP)%>X^XO=aifIf zVU6c@3DfdxQ?>ABc?L|@a|D2qC3?Plez;~{3?yT>JNS})){X(=Afl2uR$2;kY1mN- zb$?yvu{=VExjCd$;GIlSiktAXC&qVBI1qDNrk)=iFAR&r1oqeJNXYr~eClxl&_91k z;#g`9X~3Y}?Qd`j1l2;Bl9l$q0&JIhusG6S3*gA>^+XmC>PN+=uBB13VMkqhxV$)u zgD7H&ef2ZrLjhox7~lL}+wim=?{s*GxAYj0NeZOX|uKOhM(M5z3^qFR^i8wJ$(b%Uo}XTRVz(Y zK*8^6LGt3u_sdSrhd%^-j{q(HIEFkK)xuxCH3Lkho==y2WPQ9)Q&C*E;#{QKYtV}q z@P-@@kpxO2gHS#t1744C222xYNimBvyIMj!--yb&N0QB`OYrs=qbid|6#vWXREY5? zp-;nUz$UM|&EaZsG(M9?iuF_%@Gdnt)pl^sOjB>Re|+Y&GByZlH)F#m;Oc8pqBAPF!9E<8lD9W{H zarevQRlHgDBQZvp`Zn?pKM3A48HmTLqHFP_)yKw`)U9x8;b2OjAc|HZV$xF9YNbKnNB;82)|k>CbUop5HRka!Am3w$!O4b^SA12~IsZ*Cm& zisv{pusSe&KT63bM4_ut6+_kB?<7+yl35UiVpeXvfaa@(2dhST7ehlNs#o*$-^*~< zaTgW0i=gP2Vju{1{JCxW!%AI8Wb(56AfI!|~0achRXS)oRoh$H$ zy%1x!3rS`9k$FY?dMa>X&-xR<;u^{qwNhjn(oawqn&Aym4B?LJnA0^I%0w zg`|uR>$z6tIXR(-il{3|`Ybw-SVSre#n`!3J0!k!N^y*<7*Oo>CE#)psdvg(v@cMp zrb8vrA;OK`5b$_hJ~|ML3Z3}gY^07N^sxFiXjc&9YdX4828NZ&*+3^--6lqvd3m*N zG0q6Ie&Z{P=s0hk=5JF~xoFWeKV2{NXbn9(PAxXySZjsaCk(_PZ&IDrBm3#K8ssnx zUsn(LZ3IGsvI!W&eS}nJhLl!P{lSG)eKOn&kR;841c%k)FiH9-2ZM_&f@o>WW#dZ^e0gZ z9Xji17?J>G8;#TT^j8@iuxg<6E&ye00{dTxIxIy^O{wE)+aLGhs^j5MrvS9!5r3rH z6EUn7D#g5nvB^2OZP*igJeO!GD;lrz(r!$Z&^0wUtKIN;T$6-Pc=WDl1S}n9wwJ`h z+|`$&0w%S=5q0mx#bUVOOZgU1`)6ucW6x26?MeWnJdV(`f6vTmA{%5%uqcapNh`Bb zKdfn%^M>TP*I|)HYWGEIYj7G!7pKG3){_*E^suF9Gih1B!(GPR(rh2h78%aJQHmSO z-wJr^+|T3b@^HXjILs5~LU`N!$R)qm1_zn0{ZuY)i-TS=CdZV%7>>f|4W6qI>94Py zA7P1sR(ZGN?aUdHVdO&&xHK7E^zyxoYfcsGOJnblHa;8_!A5V|Qvh|ksKjVEC!Oj3 zYu_3RSor8!HS)=2(Q@H-xInHpu5gFt9Ul<>X^O6!x1b}C;ZyQ_jl0FuH18T7^QY-` z(sVhlu?o>yO5(A!WN9+fTC9gN0s+sjh1ssI0jBOLL_aPjQ}ggh?5oSGqmn5HJzu(T zYDvWAwHksZ+m;nn8--SXlNXMr${H3Nvk7S91sndFru6vp>oZZZx-6kYq9+nxOElz( z)zV@8uGQ%#CvIcLDxm^WbeQgUR(c6PTfsAOTC5IkX*`>axP=8Z)kfO_l8MCBYOEP3 zF5N<|G6zCQY>fk*`G%_rscm0OSu|gtG=Q2;Oy31gT~|0k=3U2cA<=)B5tt*}Fm$MS zkw7IJO{)bjMLz3cJ1R4tpENsNhAQp863tJ*CM?JKJuSbGo?X{V;PGQ5?LxSfMnVEE z=4_`3dT5E#+c=gZDbckF_^*6Jm%c#`=q+f`wR)xb46S#a?@@6$0to?LyQN@<17Ng& z8lpzN{b*C2551XZTg0p4;aZ&OwS2lSxKggG-{cE zUlUg`)RNEC?_u{PG}GHeKNQr$_QcqvVR==?6nERS_3zA08Z3@a-v4;_gKo|Lw$uBw z(-CvLrM?_{STCj5dyMuL-c}Fs8{$b?UbkfLc<@7;&hNeV-*qR%Ard-f@)s1N+A5u_ z2mEqaR1XC}wn70Y*wPsXD61)8hMnESkNW=aE@Auz3H%A-&@7WmkOYtzM^R&FRjAjT zaV?qVH^aE-Ph7cciJE+)Zu9w`CC28ptG@jNawrc*!Y2bPK6No_snCz!u=^FgM9lQI z=SzF-XuB$ak!bru) zD_*B8@*W8ilp+6}g}=cIIdnDl5eusCPG8)k!yyy=pErlP4e4a`4*_ z%9+>20AvZq{ntKUyX^qt9k=z2?O|a;nDvb6yD6liG17qddfpWLko?sgb#wYI#BAVz zu@$c4>S%-wok4!54h`8^4I7!E-xD*smpVZg4l7Ri72g_&_4<$o-*O%&;LR+%tB2Ro z{X#|Tea+x(q-~EFn$H9=Z}1<@H~>S7E_+agA;xxq5|T2h%I_s_Yd|h)<6!3^>T6k@ zF*D=ameRmO@g`FJleGUg|6Oc%ee(DB_ zLHhdRW<*WWy9-&x=F#TaC8I6Xx52}QE4_BW0H#r9I0rdJb5)Bj>r{J09qj_G)+`0N z2#|zJ*0t19buG!RJfBv29@3~Yj``J#=T=dgG-;)N>-JbR*OyBx&pbxA67_4b??@f5 zkd>Wm`?E84uj#5Fj=^5Nj|#6*^EmkFp_4=nt5pp zbwSb_>~(`AQ@Ly%nRegIwi^z{CDwUli~jtc?@6xMN_nYchF&aFkD5U%SW)onM+?48 zl~uwMt~5!xGI?-M{I_cfTpw68NZK`bc++c@EI*|6~?qj zE)Bhb^5=b2EkR*i-f0oGx`*iKOMW(O?!ax+V&GDS!mW-8V&SfWuStXrJQgEA(@ML+~T}&j7%_n(HaLBwsWIDwhj*kVqDCFC|B}%#vjQ17NIP*;p zX~t))_B7YzJya>5=1rg%yyf&2^}}hz3(_U*CHHjTvb)_|)dGUMmcHnG&Vy!466|Mv zxGP~%E3yydyU#mxYK$tXaM+<{lfASJ``Fw^sI6*+L^Ub&}H6ZgP4Z+^H=tVpHf=9)#TA^ z{HyJsXx*+e?Khz?Oh+pyrw!7tfye^Ylg@X&{2sQ7b6gC0f#xG?Ot%ysXKNNm%=Tz*2 zLW)LJxNtqtKO#~|8>KY#_@S%o-3fz)*~ws}eyJ8T$N*y`?|py}hC1sQ9sNt&Ba#uzN8n5oLHF5(-cOR={UOmA;`xkUqsME~gv_0l7U*cSQ8 z)zhh18b_o8SIQ+1vq`a*cH0ArEMxoBLCU1-+sIyuEVDZ7LU^FEbYz@GnwZ^yXzX_c^-p0GTz@7kH+di;sfL?_&6H@G?P zRuOL8W;oV*WnINDB?2mc{8{+riB3%6Hx+}jh`o7spK@Wn;6+5O0R1fMLl*#kJ(>uz?l+$|E=IO4fd>8rV+BP602&rvoByr<1Y3_1I!W10jM z(zsv5#8aptQS&o|$^z=;I$ll`w}7n)&ROPM40$=Ux<&xj;}Lkl+(sk*h4x(VHFc#HC5ijkIp>W0ElQtM4k;(D99 zl1*Mt2VBZMm?el3eqCpmpgxgNhZM>qY9;)PdeTvgy5J|}Wx@O~^mkZ9yzj;#EkmIbShMXe@=MD{j$-(nr?MU@S_JO1*ea^05mn|7ObqXyi`A=8ap z@>VYtU2~~zO*53pIPZv*WLGqb>0^qTcrRe(xMr}nmNG1KS8ND!PxKv#=_(^lYw?#r z$(A{rOE-2D7pW%-Nz`8oJT|)7Xo8!t<=s){H4HXLkmLW9hZU)<*5V>^ftcce&#PQ^ z$hOwDjPQ8;it{^Mm|7$}EL={PP@SqMP#YoQp0QG>wp-XP{UCC}CAwAaBEqX}sK_Jb z3U?eoRyEj!;D;VZt}?VCQF+}>Gg!R^zs637g(6W-hDHlE>`x<#p&0rN?PW?-dw6iL zwHeOGrkoFmL5P4Z40Ie3o2!TY>nyQ%43jaJA%x(SoW|FWlBAe**cK^XG~%Rv3L zM9t8fu>%%Wi&#F6(H>#c_|d-h5Z}cp%=LK8f-~diZOV4P-J0(3{*cTG3E`n{LvpF3 z!qztI!J9%Hnoy62`?Rpbo#{dr9yY}sWjYGw*6hu-;=y^3-H@g8v}|K(Wuy^ZOc4|! z1}5djU{AOcw1pe{9^pax!j}Tc-_M-NsFDMYN@Mh^iwaX2-KmYKqL8kOL>0$=0-S~) z*?u%q`{9jFXYHgWucF=L{XIQP2j9KwkOWw?kVSVo2qN&Q@L&-~-JsnQz#@4VV^p^YJkJE{GNEc*e>`WDSy-&Bt2ryb ze}sL^wZ2aTzw6w?C;Y-wR^s2*x>>*TtjExcfv~-YTw|>Iu0%gfOMMHBvs(mwQz#k^W9Cm zSGfZlZ8K%5x8|KLc5uAKtwW8l6HVfj{^44s4b&YSv^6rd)}5Y5&T--z9D!v6iF`zH zr$OCt*dA}w|KmPBb1#{D0e`Z5==kz^0luEA&b)x9P5Cfb1CG7QtojmKNq%Fx%2spn zUN5ZN(J2BN9T%OntZb(PPIojsVK3(QCyuMH=BiD?TfK>^)wx^^scllTTvEc7Rris4 zvx;;f5Y&smFkl28Z}82Pxt=QY>(`Wx!?%ZHz{|;HMaO2-lhangZXQL2(gHONSk-?0 zZs^CTH6POpM8*H(($Gf-WMT42U_yi~(|{Qs90UREG>3+9JMNIJ&&qp_sgAp*o%4q@ z&X{Dv@hjYLKXLEZWo7SNz~n(N+s%gPJzV2`81H9;>)uZQ_p^qrti$7c=h%epfl${g zP#CJmq*K?fhF->V!kt=@-5e`@P+|9Do&$0ZixVyyhNf^(-!h1k;qxb#`!3B-v%z$K z_v4aytw^>nHw@?7bblhIkx!_c9w|NOaCabr@x9wWsU??XX)^r&u5+vS@TzJyq1J7s z(muS2H`zazYq=#2ESF`gjo)_luY?A2=6mI#{|m z+K=uj3u#%Sqo}XQ6<@Oz%#8a#z#6|1%*8a+nSa;^K~GrU>lNL4=CZg&5awx$j?%nU zLe;KzdXQ^2B+8Pvjm(a#IZ^#UYMH_|$&d8M4$P5esS_)Xcxh~b4A{UXcl7`TaSRjW zQrqO!8U46%&WaU*jTpZ};vEO?4O^c#Hh05+IAbKKJKTsQu+P2)PqJC!SQsUGs*U@g z$M-W0PFHdMm#6}(L7j|v@CvjW)FcGrkT#%Wtq-^fyWgC4)A>f7-F^^eBiO62Bwh2m zT^b9&&2uvKt<w=H(J?EXz98mj*^?0T0 zBq354U7wfE)YN;QuLwoLQn1=e@LX*7S;SO3O_p<`M@ucgr?WdZB8;-GxI4^F!h9Q( zwov9OsALf-(Ne}rV_tKiyq?#+qan28h5)5ANsKmBr2BwN#0*i84NH>r8@T$ z6pLZEalubAyAqtH!O_-(x}Zf=%!aQ*`lH$(DkXmE%QWyW@6?9)Pg=a)PZ6~=5hHA| z3@1DM{%!EZ(mgq3Q5F9RlHPsPgVbBx*(KaAhUKBNrG zsJHEeJ7|V0&ieDxJ#Gm$$Bw)CY(Jh7WG@G4HmkW2kK$9m|? z#RgD2gvyR0j<)-N#a;p!hjYvWbk<)z8UUk3sv~G@s?Q?IujQe3<#yj7x4%`+EX@1D z$=ec(Pyb|L*R^@K zk&3cpY?yMTk%xg$VbXBLal45b*%eXK#5>H_=Iof|I-;+N`2;&5`Crfqm>Fa02+cqT zHIt#_6dYXuwPueZzuMB>}?;0QXCpe>V)W8s^W}zdS&33z2dU-oTk?B^RQGid|K(-6=qFu{BtjQ zr~c3Sy#EduB!+kte`}MplN_|d+SxwBz2vTcXMO~Xjfh)RJJVP3-N>R^oJPBT>+=tL z&~KeXql_Yd{EtsZB09gAg0{3&L#t{)O^_heZ_y0l-X2m6JbY<>20@<873&Avo zf8!ayJ0}01@BAXdam7Y*G;L*A>hVZW&jUIU{UXE5>Xf09KfrI3Vg;+-IWW)={SR6i zMd6u&>aH!^bB#2(VfX!e7GU6kp1{aR3;B#P2LbkTeh4J9(6|8Mm-^`bM!III{O`KE z7b5`)98<{>&eJl`(e}6PREUs8=tI@traDz`z`6FFuUDzL34hpzRYzZq`1e@#x@se^ zp&W~d#zs4m*#?JCzw`~fQA@0Zii^d9@$$khu^y}cc1Z}fMZ)xagyzJJmJ=h_qo?>6 z@A`|kv4=M%mZFvdS+B&rVSWR3I)6{qpsgwvlZ#?-i^tA~A%a?s~u%q+D!KoF`WrQIqzX^2O=)T;jZAvj}< zPC6k#qHSb^Tl($O*x;#F8qt5o8}Ph`Ln0IjUFSDGM{+2Q?pbg5eqRPvX)fp?3n1L&K{Ig-orgSI)SS;F{!1?{X9Mx|;> z5a)(@Qd$g+rcTuViHtn55R%dFNKYIzE?pPAxW8XgBytw2rX_x*W?-O>9Q_JsV3)1k z>1wpRIU9m3o(9oFfef~cm3a&>EK{Jfj(EequTAt(|0aVL_Bu^$EP90z74uhnITqG4 z)L&4-Ul|;*LsUw&t0hTRL+{Tb+mcB^p>tRbQgAryRk8RB$C9V-C0HXI_R5Czr>WYP zL7NVSda`09#zQnh4l1-a!=u$uh~#|X!>js|6^yl`CCKaQ?9={hs!)3*Lv9pb;X;t~ zdFKBa!BvVvq)LfFPP8p#zKwez%fXMjL|2M??}q^A+bL{FWxA)TfP=vQr$Q3##H4;n zHzn%%?rOwO3E|PKm6oWrw-E7-55{>%r!VvWGoRV=h2V_XlMc3OnGDNqq+2&{wx_E} ze)kcF!bQi_u#*i0eDxqBAO6#(LrQj2EJ-$2;?~}$Tw`b#Av`MvQ)Jc<2De&%uvnfMWJr3Tsq+BTSV7L}HR6BT&M5yL1V5+sj z&MM`AQ}mydjl@e9ApwP=Nh_6ULl&bC4yyb@(B$H)vhvtgGU``f^Z!!oe}(5h0x0#wN@8Pf_e-e)~NmRh8}50(}d zaT600Sxu$PMk6tRm_Mk|_6jb2gVN(~{|)p-sBd46^2IZcd=g??G^@(e@521M9rEAZ zMjC!je4|60EJcEc=kpEVT@evLA93fLgPc{>jE?JwT)SkEIcREkgS*tShl>KCEEBeL$9v1}HGaSEeYe7cg|8 zx&s(JAtJXcg@#vXW^XL>_yTLtf7;)w_2k->_m2l)=fMPY1<@ye((8t|aqIm_Fl)GC zz)Y)ND{ayCY#;r+mmMA_5lum^9&`WSRLc{_nF`X;{NAo-Uq>Z-3tKWDUpW`(B%|En ztq1&#@Qb=4NQFWl=aCqiNsw^qV*oP{?L3)e3INN=yEsr->OmWTm92-c~v{Nxo!BV1_J?RwMjbTPPn(sar; zd-JvJA$INI|E+j&l6aUWbYp2)l5!~5=j++LTmUJQg9Gej82xtv*eU*BKN$>g5I|xx zTIj&zM;7`Dk_wE$E>c+D13Y|{$Nog_il(N;?99vnz2JtOr$vOf3EvvtmKOUPsNdjO$0{lm}2ipd~3yLcRhqEDo&^{p)pH&hd znBe9Cn;nCzz40EuQvfJFi6BP6)wGRlKSrh3k*AmCx&DIbWC7R*Rc^x?Ii-FnibCYErKNC_M*pif(^1tZn*f{rvfWpkuurp! zvK%6Dw5p(ZP5VR+`h$VmHC~>xc4(QJPh$02jj*AmEjMbfMqAR4?PxeOu0Mu>Z&nOPt;(2Z za7*``%hazRa#4>8iX=RBS_RDLHhh;;6@4Mt-(i|qzk@+K0_EeiFQdC7D|HS=tBY=n zt4C?jRm?VvOOL5er^}})*3*dv<&3Ap7iV<)GbPEB-?*R7x~zvj_VzIi(nh13XB8M? zu*g!T{1D{1@XFHz-bbkniy-h^dZ`D}-HAu^s+HZaDUL@Ky$4&EW_o2UHyH4^&!9kl zuyN*?>$rcomKg7@I|i~UC(sK>NQ!irHGBt9GIV@TXFeLv`?A4B94KW0 z9=MF-IZ1CIyw&krX*ei1`IEM*gwb28yR{j6r-RdYZf9F+s< z^dK_((%O@%wLolt>+BTpU6%ns19x!RNi^~uTQbfXqb7loO5pQw@1Yo%XIv+b=mZGn z;{%t^ko%qah?U)2_mN^mR;FZ`s*<^%OIv8lnk!Mf5E!h1rQIZP4G5BntJ1Yz15M(pTQ-_kc@lK#>iqT(4VNU zM96mApJz)>WIn2iIT9WCB#zEnp*TmB*QRDCz*%S;XyX0RyQF<_v+D83dGv!CcOQme zvK7-Y(^2^U4xBaRJk0`a;pb!oXyWttaznI;_uKZ6)}WdL88$`tp9{gKenx?ADl zuD3K(f_II8?U;?_pK%DCG(Xj}c)MB6*+@7RZZzm>2CkF(?9tLH%0L){!9l}#%^m4+ z3Yny>g9$#U&yLJuL(2(2sz%W8BT;$p+7tKgAJ)@EzL833jR9l@Y83~?GQh$5zqcI| zFoD_`WHkz}4O>GbFe?{mhHiwh}Ew~P%;=Z!y)5u^-&m-7z4<5;i&y>Sf7XS(O9e9wEV3J|4};{BjDUA z^w$qBUlLF3Smwc^;*b4Of^CUIyujulo9q<_-%)%3;21GQWvvU52v2@vcpo`X47d6s zEjmYY_{gMMhX92B5Op#s*jDgI1eG*eL$~824 z=`J-P5?oT%3-c_ybMJyo+$V=-9sIriC^>yO+R$+1;#_$Zu-$}30+~@g6uV8QNw!}9uMER0(h(;dq4mVyOBg?~Ol1=P zvx@#&Ok~iIJ6G6Bv5h~>?Q}uX7u?0t{AGX>oDmePlOAZmcLtN}CL$kj1#BvxoPSDT zBo?ajq{4HFCs1YL0$v^@C!>wtz(YQa>mQ~+ci7TJ^E)#k>^Suy(iD@-4@Ag)X^TUn5bVO-rjPlm<@6iYh~#wM zD1G?Xau)ZCVS*5bYgT5&rtacT!prwgPC#RYMhv${n#rHzx>Zh4+RtA`h59D{KZe~0 zK?t;e-o{S-Fms7o?|^mSzn25cUZ)WZ#>xwNAMM z`#GQu&D7e;l2mOt(y^LPnu{3$|5G2?XiU)mj}8~;I_@KFU%b9hc9MnHm?bdkOHPmF zU}8S9#p?b8=fDZ{-74_f62j${6>o z{_gtE(6!1_X0g+-8+C|0H|h;Li{J2UrGK%4bt5$uJ5;0Z$@i8Mz|I!o8@Q0 zGO-2Ws3Ec6CM~uGGgQH`tUuwohI3-&hZDrn(IAmw6vnk&&qeA)F?L+wAxT{WO>meE zHzaeIag2G6%Ddj*xBG_jL(*a`jtjOUf|@Jx(Dgx%P&89>F#sT~gyY{+fztulINEF;;*E&nl_qLvnzrP*5f9K@?{dJ;DN& zIyXbbBg)n10E1{sB=m=6V^#=X9X)k6U?yGB->AMSVgLMds~TT%fgqtSo4HKuaytN? z4x5{I=iF^slhk4<4Z*3j>Xsj8G2apw~6*@WnzRIe(`&Twx@}b244nzq~4p8Wpx#K zyh`D6RCp_J#10S|cunQQA)(*&z4~y%v*RxB#U=gBQd_@O$(h{TC3onk zZ7q+uMt_-;r4o8s6nx)bZIM*oSEtH`i|>yU8p!#QsHh-7XLUfEgMb?zNP>ekt!~V! zZOLN!LMffFHk97%q&)ZajW5E=Yd$e_(|gRIczDF{2dVqYrVM)K+DlOF8 zszq-IUkFJB691hY&U*k_u#&#JVV%{OH0d*@T>R8!>et(vjy&B(772j{Z!nxpZ5@V# zz{k#Flc^)?0C-Gt=!rN7%BY}N!$2Ocws~;A(`fnOankx|kw1K4;*OfxDSZWuYw4v~ zWE)^u_=yf|F5vu=)@jPj@^$v`?}~aNWWPA5kyT41u*y7T35Pj0r)!)^G9*432D!Fm zj3q89jIec)P>r@{1w$oXD@MBb|0x}18olZsm0DsxVa>OoecFflOsDj@eEi#HVY>6t z${U%u3%latT;Du|5{Ml|7(f2Sv4piQUj*XlE1;3X5IJ&Lg^O-jY!RZLKjB9YXi;BrTPwYdCSMA;62$H+FF2yJ{^Q18TNJbX4-46O@Odr? z{DOTng1p%p8wlON;9-sdtJx{6qPwuzCytZU&7x1(R^#;os@Vb`E6 z_dqmg ztjmm@ksg3EJm-fO(MTpA~3_wP(NlBna_`H0y2|I1|%D zY37ML8EXMf9m=?1(l%$ymXH6YqziD6K%NsrOE={(VH_|0)~nPPV)O||Xdw6NM*|7I zw+P>)Rfx`=WMkv@jHNkYAcv}-yo(8INX!-A$Oaw*6)TmS6&Fh<0I$|`v;xhnANu44 zeKf0?R^I>Zojg$+%lhwyIFX=?6V5b~?wksSG*t*<3aUa!dvxJhUN0v+U3|jrDzOk= zka=;R0&wPtWHm$9(Bv9-%;{Xq^ADmh%|IQE&#DWb_4}8mPs0=_c!%Zx<{KICXQD}d zCh6%)N3NW2v4k>;03YZ0-<4ehQe?3@#~8Pvdjs3wNJ&|AC+)f{ z+LDU+5MLkrVIm{phZBOkIseoaHS$u_!E{xCx22p@E06IE@qZIaae64UzH_70)PVA^TTttLN^rCaXe`^bD=^h93vOP;&mjFF19p%*=Jn7d`M4Fen|4=u=g|IAa zw44StXR49IAEOkB?nD$_;l68Y>zkPkp&etor*M4}MQ@xCNS1am-qIAAo3ko&*!J&q z(ShH>hO)3u382k#DCqN(^yXG|K68tVPXBetJK~LuH=5CN>LLJrJ)-X9QLBxIUl1Ikyp*{(JGW#K z2FD`W-RE|uyHCv`QvCmfy~Nuyo$L|8n)f(-S=b;kZJhC3j#H$6LN7I@_Yq#;;ku>f z$9pCw?@DLMTKR>xnYLN; z>7O+<-9q5^#-yIxnQD?{2fYYMzN4av9oc{NYBN*Tr#k#|(i0Pu>&pb04K3#bE%QR>y zVU`D_7xu?Lu$Xgc)V!1Q-+98CM&0db6fH}si0vEs0E*_7e5w~V7>Ebu%c9*s?EaFy zW6v#KyBp5Rz)XMyK7_Tpsd!pV$(qptO21KL(*%x0{QZ`Y0obQ%^7}fk_v8ZQILhx@b2zcZ% zGnk9xRAZDLNb1{mnfA_8ZwCAr^0G+N@CEjQc>g11o)!E z>jItH_%52lU{`mkgNdFqNRY#U3f@jqqx!sm0)s3j$f=*S?$2u_S#r#G)a+L>s z@hb@mL}RKQzVyq2XOsut1Es)u&n@!#WFlhGS*9-*)vGn@y9UZ?#`VF??BQ~=k#O9A zQmjos68^y>4!KdI=0$V;wLi&c_EU1f`5(otqoD<|jo+%bp6X>xK1Sj4S*ohj?TZQ7 zx&4W1?j=Lz%lo1rQGuREwkmPh^gj!h9BSQwgP3zqW_E9|+R`p+Su(ZL_hjw7#_Pkk zqWKlCTd>K4W_2y*A)MbVexB@O8STGb02D?r|Hsx@Mpf0lU0?7hAcCX_2%JMoBPm^n z?(UKX5$Wz0>Fx$;>29REySux=cjN6H|7X0P7()>cd#||GHGgZ)Q0@L4bbL(6i?{Tv zP9)+cvX>JjsK-;{Oe{JL-{sG+7Lzjw6y?g}VbxEyNi=ESVm~XNlHb*QPf{S`G&ffJ zi$xWL=9_pH+ckeE*m z5GC#?fW4gC%>9WTf=HASP31cWvnM~8jQ`%+-@t0%!f?k8VqpyR*!?sGa_mh zxfDj9T``u+KtInq?RPL-kMCL<-Hw`LSBrO>bYeZy4n*PZ8=!YzMNh8d83+O#53 zM|qRpfh6a-x+{}SVk1ODpz(})CP^-1_2`h=TmbZ3j^{3MN1dgbZd*rlO0cMtO3!J( zm=0C$R~da6H|O*TvsHtw5={(+R{3|zkj0MA|9(VYo&+#T_*do{9(3G$6_IaP6Q{-^ zzgla}`?}|%FS!XLSsR5w`u<{7kzJyB7CW=lGwNs^G&qr~#;uG9s>szda+=br1! zmx0b7s>rF8r?Ux{CUO-nv*fEj&r^$P$_>;%G0qo!x(lmELA&h^ zxmW4ESj-bq%`Qo&K0zZxWDiDRH9!wTl6PA5!n!J5=F$wQPl>$pG4or8$Z67_=jPfB zZ`t!YSzo$7P@UqJb$W;5oCHmw9Hz#{av5=mXg_NDl7}?wsp+VpT_)e$aF-*E9)Mzv@lALDZnK3!G6G9@rxg zXapR0ygutH=Jg})j%OP*X-RuP^{UBhE{mVw6zV+t-zRA)=-zGFXjTAO zBik4HOm&ja#BdqbeG{i9pceqMcxBMfCf*V&EL0lwz9;D6hMw(GJP@%j)sQRXo>Vki zF!!QYEOv8r%5QtwqLO}k#42~U|EN+$VA5rTDYKRBEf#r6y@c2MddqT}E+_4KXSaR( zhK>Q+d6CI-RiZqXag4CCF^u98w(H2l&ZIR?E^*ABC`M)xkGM6GYf05bq(5y-7=AD< zs!`sxg44^Da}~m0E(()$IG7syU+1ltCtC=sOpNvhHv&-tkj#1lOqe(On_rtAJ#V%5 zTMZ!a+jM=agVXtG&{ffN#kTyp%a!U=L5|!O#2aH!ZJ|=Iu<91e+U_;;J=dq%oRZ7! z;15S+URU(TR4MqyPRn{>W`2QGlVWR&bzAARIOFEwCOeH+kP}KahhEjfsr$q63y%wz zgoh6tAzIuV+@`xT716?m%i4V9hG;YOQpIX<`t@-;%BlzO)v2#IYwgN>OszWmL~;JR zH@|~@Cu^DXmSskn66-1=emN@7yl)vWJf65!CZYKhw-(_*=r>11FV@Y=tdkfNG#+y; zh6vGb=9`q&mvxK?Px53@zhEatV@yh1!Yg&|0keO&Zd zRs&0u>}mBZ^nt!|BFEnvBr&RQvSS|+Pu<~VwB{){Cc3}aGt-tB{KnE>PBLmHSNg%( zi^;tCfW3&PX;o3pN$*@NB1Q7k5}cId6o2>>kZ+PzU$0IGx zAJC`Jp#09mT+3@#g$DM{*Q{ykqURI zE2^NBU3)G~TL*ff)$YolQEq?y@zU1xXsKnSP>sGbvUyfpgZZ!XaTh;m1>NSCD@*di zSdkMVugh!?uF`$OKF$5@E9us9<;OnbrP`jG7pKa}ZMG(5_j=sB^vK<8^NC?*K!?#l z^7wcjwtAb+z;SOhB)5(kB^fm^o1W=ENmb%Ub?loj6oOB0G?be8vgPKVBBUGvHW|Z* z7(okVzy%-c;yTENF!AUL0fmb#nIlX~bw)x_-9jLa!BFysB~IkIUcll`k!bvlU2!z6 z+e_3>FU@*cKx}ZZ_oDeDi>M;_sEVI3qku3jVZ1mno(Q^B^lL!D*CRJ(8N4z5YYq&N z!rSwKuv@q150ge4lD|k|H|3zeHALg?za;KHgou4A;N%DXNJCSdFaFBxV4Cet`zCrFGh<&KLw@aZn7mN(@3xDWBKWvm;?90qJ-t2i9+NX`x2aoX zxZhs6kv#txpXJG(dIozXC5KPPm?enB_IZG-UJZbC-WDsbS6XSTiRAaXf;TMji^iFzs5vGgI6+ntL7QZhtjP zT+5RcE0yTGdA+bC$s6Ub=@E&#*KbkkZ4sJC#Gi=&y^DAheFKWYRU+GwQRmbqfR zfbxlO`ig~L>R2rLbxwI*Mfs+0wy2?wrekzQsOIFW!F=PB#2v&T$V7X?G*OX})$i;@ z8c`?g{_aJjlE9DMF|v)abq&L6)Sp!Q33N!AYLsFe5=}ZvBVg}de~}o-&@vug+?2h% z?K8Kzn#D8EU`Wx;o64wGeV{W5YkT$ykSA(e8NPUOoi>Q(CDnX3y!wXqdZ!d2_oEAS zBzQS+5nyT~#j@m0iv$7k}edBMo;MpG)|CfpHCmW3kSOU5>a=CT2X!LnjRERqGkfDf<6 z$8i+v>%|oeaJos!WlpX}&L6~}9Sr|;wrbg6jpBxm#8W6w+)bkECvqFU)MypgOsW!^ zQfEXE@54Qa-wC<_zSsvWv+R;pN^Wd7Y|0Sc_@iZT`4s_+#=AgD#5Ip?61;h)x<<&5 z0sNh11^s2gY2WGIDfUuXs9sCK8-u;puVfbBP}0{~O#lAJ6tKx}j?=67n8ah3OU&~M z-b=6b%oA#5o+gj;pdK<6`n=eGGO1~{{mulx;(f1JxSRE;;M^Bucp^=)YRCnUHh*Wre&~8^#2*YZdr3ylwKz zsk?NT#B@tfwVFRaVG6T5xfZ-?EWL5C^1&Qa<3mpk9R*_*x`B12ng0=3;zqfW|v3KE5a_k0m=8nUSuRZ(3I0eB(|=#@{J z#bOiy7E`5w-C6IW>mT6eY~gk3LEsNKbO`}@b~M)j&yy+Mj}L%q4$~h4sn-BfE+Q*t zLg`BoJVXUhF)SSbzBDSL?QnkHx~ef)+aX3KX_ zwgioQLNooXnjwnj1;(~JqF_Ot#d+@_N0>h?6B`nCOTZZb>_vqU302-SXty-zhHxro zOUTW7Qdy442z4WnB`&p=bOxsO|E7$9-gb~-&8cRCV>f^qZFMPiz{Y<7j9XYAu2#!# zqV@qn#&TEyOP{2cx-g|sEBenlFH{Sju0=3kE*l2Rbwb!CK|RXp*Mz)V2Kz{qDlSJa7b6{7w`G+0}&Fds1YbvJB(W<_B9oOAL7X77W56`G)4{k%30Wj!U>p5VJ zRcpJ71i*lBCvLwk-GZqsB7pWa!L(k4#2Ua~gaeGDBtToRI{@}``aYP7p^p|=$2%h< zH|IFzpg+4wJGj}K9jhA}E$PTXC}ek_CKeBtznvdv9KmRERl7InERGo56bC~>!x}Or zsiLG$m^E`!+4{${6A9wI|wmmfq%DgG=i4XtHJvU_yrMr<^0G;zwWsE;oQ*3qz!- zZZ?hq>xC8pfmnmvwGT^xVOfqgnt!522R0vf{i91lDPU32)9Qn`{)iL;!n#(pil7$i zbKtk*m9sVaYo`tcgT@rzk^LfJ6o!z0^mmglwH~Y+*1lBmv{phgKG7F(qJ@({cLD4w zb}FwLkc-1Ib-B~L!Pf{myKdIunQ6*ebs$)nh(UTye~irBvKNb5>zJ6e$Pp(VnZ~g* zJJ}rPV{%2?pXY140rNz4KdLZ~jxCm^>mW+6?RnrO9tPj1JhGVtY!RFQjIsjQgAf5m zjW#*JdQe5BTHa+?bHAD5Mguq%eq+}(&Wh3lK)?KmCP2oSsCfV~vt-Dcvd(uI4gQ9` z0(hSkg5+?kHmg1QBOu(>znsL^4st7O7VcH`QrL-3IeK{FzcW-znZ*_CPoA|@ncemQ z)54o$+TacGJ@!_gUVs#>D9PO`#6WZuW17=RiQOF6U-y?q!+;YbVkUXSOMH$GNgQqu zcW3Y6V%s1f|K`DWG_f1^7gbF*-6%A$eBSiHC_LK+*ed0aA7LUxet>jZZTaBXC@Z#m zYx|e1=MD*_0)fShs4b_JDZ6_ELMX6hOEHRB6huZfZzPdZNI@fGYAz8g>-0&ZSbxzf zY)@e&bG=_nrt2n5#3z@N6>@FcaGC^ z^VXvhE>Vu&V450fs)LUa&;c(U54|)dK0fNm!AmFSg!;!opogJ-&Si*qHOqXy zTkc~SkZ{s|2!jc5WTVUim_ciQK=sqL|g5gySsqk8s=%R~cdmyP?gjkWLrZ0@WMFPu!m}vd%}bemwoNyg><30(X+S-Ye() zZY^|?fsQ_kV&=$RkL4imBF1Me0+yJ;R3#T^neg1%(qM+z?GpN9V22_F1$+RYS9R1@ zcc;f}R_=l-A+?_?Wx~pxDQ*D1J|gYX<8!Tp`?e~R%Piqf-Q6|d-^TrynPH}S}DB&R1CZYaN zhT}$K%AZU>)4_A~^q$q5B8J)dhu=ir;cQ96pGkLu;$D!dWAf?NRc3$cL4`kg#b!dz z@~qi|@ZOUIVbiq1zUIootk+iW_ZZ*gN#!dXdLbtIbrdIL;C;bH5aC$q^?77z#oDRN zsE^(#dNM%WcLNr}i6u7xpAaEAC}{ug3jl%U^!mU>kV#3>hVB+`p0=HCj9u?_#EG9RM;Ne2vY{~saJbDF3VN7*yWrM z*y$e+vfwayxEL zTnoVtK!5p3Ngk?9ZUf(IyGiw#{mW9bH9j0D_6X8;9Oei_4eqX4rL@`toa`nW@V2ya z-n0Kb2VcUPFs=BDs;@?D-vvz*)-9-u#Sam3TL0*}0nVx>Po@$s^Q5AS+!`$Q(I)9j zBV&a+*i3+D#|mzqtK<2Fsc5?9CoLAvKaWg1^7?+>C?m)l>G#zOz$En1+;b8SohgLp z6_)xJkGqGYcUkC^LkPZqTR{9cw$ zuf2BMyy;Gyuv|P8zMN%!eO>Zt`NPd_6-CG6<%v;Ab_r7P*dgt@E6OCB2^Co0uf30H zz;Fc4KOUW2X%Nx8hRkV0v&6jvoI4&P40{1%)TO{WA1iDIhc3YKcnt`17QPkFLfqjM z+x0V>F8MQLU5T0pVKcvryM1(}K*x5?6?WJkl*mXTsHyMD5DmW>#}Uii<%uqm>z6GW z$xvRqM~6Db_(yI@5zJ%*H)cjvxJ0@b6spWZq!l2VaSgNTv9E%;rp>5{y#e9Mw&uPH`9^G1j=&_d(&}6*kMTCJaCcakZ|Uw zOE69y%?xS!{O}^+ySq?0I|kTNcjRTA*3dl2T5a@M05~jQQGLvVy4{M6tg6b*9})d~ z-UF3_5c~(ejQiXnOjE4rz(;Y4x`J2B>R|i1QM_v*;`jXQZJ^W1P@hOSiNPTp#6=^c zq{%3e%SU2%<8%*XCA|>SK2qyXmIcHwMN1lf7bnYiD>NP_=K zuzaf47oV(~2{L2vfGYU{zqNbscxG$L-z)7Sv~NuV0!zlWl~nI7m~MU!-(-}Rpj+-$ zOg6RLA0iIX7SJpks~M^se%$<(_)IMlU(+-Hdp-vm1NK7)k=f7z zY%+S2FSlXYL1T#_rj}+mJX$DHu^%p)=HLFcH-!>|q{lE$D!YU%doKr0eR>5uA%J{A zZK>9HIeTOHSGBASUyccD6tnaW1v7mVmGEw(L1DB!nA9Zxp|7f2aMr1S#4seme9G$Umt$dL?i|$U;o~up%RN4OJjG^#xGLEl&Ld4)9|0csGA?}y?|BV8K5hiE*5Xpx{=l*`Pjk2zXBS`8-JWyeUZe z)4%ZLF(s1DY=1@%@*p2OT1cKdJAsN^ijT!Dd81$W-n4l82cS637-qOIiXJ3#%!3se z4Enw_bwiDj`j2r;nkd(<5wbcHtqvl%LxzGvze}ug`50=zkmKrdK~NgEmQGWm<2S<@ zxh{><i4G<5alQs&|_3qAA6^}BO9H7K!16KCLmjlX!e+w}l zufffr4yiI5o0meLL>WC8H(0r7V`}U_9P3!NvpN;E`2P5{okNgh5U{B48gXsLyf5n= z(uq9>5&4v=tYWq2A0d|S7D8B_VY^oh8Ipm&8g%Rz(rkibOy=pgb9en&P5atS5}zO;9OS zwMX&KGxQ^B*7HGGgg0>XVM0H7m$mMepP#Rn_ONl1egc?5TlG4h9n8<(Kk!8#X{-Qk z(2XLhWGaVr`CO|ak|@Fm!|1NSGEu#oyU0E@`K>8hP!4Z$Z6AtXCFf7@W`@6k&^k9>jUXeSJYDU@h3(pCgOT;K)S3Q)$Nr~Z|G4Du z!SlFtAgokPMv%JNP4m&O_v)J$T5a-#qX5nCUC?QKl~b1sfytc>FyA5|bhkNttcO{e zCl7Xk>s{xsP3U4xNF-5JRv1F{MnF;&C*I^DFa-3Rx({MAo5!|+RqF5{^0!*Tv8}pV zlNbNIutspGR6Efgph%?nb>2P1L*bj_=zivZrwGjtqMFtneuna7#kp4^1x>)h3mf0tt)}k~;fKI_hHt$c|6GMyqlCR%pwFet6=;qi={rh} zmmyruNifKLjmN&z2Y*x$qMXN9sg^5#PezoIO&mcQN$InLlBXFL_Bxwkp5u1qK-_M$ z;}6`d4Nr^+(--$+|KIItQ|2;9~VaoysgZpz?<_Tzs4s}asWiyN3F3osxFLz57{-{q%zJF(+EhaODK$rs7XefX&o?=X4 zbEwPPl}iLK^8CL)!XsGl36fkE4cgsZ!Ag9r4&FS`V-vR#p5s!#Wbelbnh41>z7&MN zZLReyR`%B*s)|DfJtc;}N7_pd$#3qCvUL?vT=6-O02vk%9%XK!#Wb;+BJ`-hy{cg0 zl3E+_d)2i<)|#=cc>P)m$-jBPDQftb&{w>^LKs=HY0t|<6t-8a{#nNVx_egW_k`5p zG-hjrtZ-5!iWPLzhH?picqzEUGJ^OaTAJ{heDJ?QKjU6aW(^ z>$CNP==7h9$f zWh95NI9Cv%iIQ|R!FZuLnGhFAk$RMOIP-0xAZc1-KQRMDj-?Rz0CfRf-D*&?J9W|% zpwbytxSedO>)PNK7(xzKR$cRWDo|N3sOKT5*T=HVFhmX&(aIC$#>>{(!KfO2{tsdM z2G+sTJpsH4iTL2`rCM4pOg4SHsIiz?B$eAm!v6IWi@IK@3FE+ws$MaB>63h2@oF?k zZ?h_E4Z?@z3$I+_lWvZ%HUL;TcCjANI{DR@)YX^=96rOCGC9yh)UhB!LVWWcSKZUMStxk=milUbtoTqaCWK-4oHfbUHE*r56FR)O(OjKlr<`;u8eUVW$7k% zM*)27Yd&%7CV3vLcIxHshyCwIK*ml>PsS;zcTL9*Al&T5uZ_*=u+*oqlZ89CBeTyDd$~9t>mkz?3SlIegl-A%=pB6FU1erXXL6fRwxiKl zr9?)r>;&M|mwe!zlc_jQ0mI2+A*k2UMqMGKbQC@;YP|iRD@fkzyu3K;zK~U2^xhV% z@T!5pxCcxQ;%T0Ik^VfOO=-7>yim^0J-j6$3WM)A{C6!zIOvRXZb(T&<2J?7Q|Cat zKiQ9St1u}XwjE2qM?!Tc%8Crw%_y+Aw2>iY z=tW#E(fwPteB2gPece-*stTee^7kbsxqdaCPaNueYjXMVdB`JQGf4bYk^sGMAH?b? zQUTWVr6vK;UDT2f%ftS9VM5woLN+JACQ4-Jy!w47!ilve4^@bEw)}`rgl6_q82c_7 zW)8xJJ!=z8<|$!Y*LF+HP3C{$;)T)eB>(3AEHS!w^AO@s%euI2QLtx}*62t~u(%Jc zb`KJ6ITEW1yC}Y5$Fn-OJ4QqIxh@^NU4#X9HHQaOpTewv7mYn*@+xc@!}+3$`}Z#X zxDAXEx|5P=f#jIYB`-<@HBAc}T?%HBIwRNG*ImaScw$kU6T!lok_k&8JBP!HCE#6i z=5Ku`@0M?IS{;5;o@0&s!1HOt$8m{Qh&SS!m1iDwtsC>M)nJ=Mvy9_STzeHy6blJv zyHnBOYEEK7pGK0;Z?cpVFjXrOZT6-??oz@#2(Vb1G!~=x%O1Hb;VO(g(oq_HxE{+t z)zE}))hd^vId9g|UXkq7BzNrNweQqV-B-B;%go> zNeP!jK|z@$qM3E@_wA8CpIjBrV?#1jZFA>b?OM^rRB_1P+PmG?pHXQQwNgAGt^*-A z%|-atPLh2bo{<0BvVgAVc?}+sdgwbpcOS}$3Ekp$ygl$&&fhJL)m}jz{tE`~oSp>< zo*)YN$(X;@apmu1`RQqZGPhDKA@IP#JkqLB8&Q;Fqr^|GtZ9YHQzFkZWh$(@YOM#UMuCAyE~GQVg!3-~kYFp0 zG*=`m$%>6j;it)$k2jSDtFV)ASAW!V`zCsyY$P5PT*QX%K+lNQSZ!SHXVBtCTkq6E z5I8KR0|{|+bMaxAI||dev_wbfpCxovNKnS$d9JaMA`=TQ9D+vWtC>mO8$ z&$Fu-w@14Y1LM70ALL!)aL^3=3YtL1v01#B_`iO*Zj0lj-eg5mjnTo2uS-sGP6wcJ zON`)*631SClX3YlZDcJLZ4;p(ICykV(!FP#EXGfP8c?8igxP2=p7Ec$3}_4?5}|~< zJG(Etjb?mVJX=q+kU{bCEf;s%O)rggf?$bHz}u>F)1ZWQbRei4=?fOWDcps!|8h|A z7SLMd8+uMVc4|`-jg_)7c+aI4_i4Kk|5jZ8b+iyXf|%)K69KORkm;^Q7Wq=(rh8EIqKxrDK}{ z!bv=F>=90#ac=22IR+F-)f{PT8n73bO>Qa|YtM@d-@~j`x%o$N|-3?VsAv?>oY!dC1Gz&`r$G{pXH~2 z_a5;+xycNJ7sYJU7w63?mziC>FB;kR_6{$B0Y#)`ddF|-Oe8O8@sp+(Cj_;J!|6%H zqg0Z!CmQ1`{%OrTc7w2ZQ+Yb$c0o0qkhtrHt^t<7xiUA#Wzv&b>OA8f;yx5wC*HL= z5u}i;5({5F4;!x{08&X(C>;Yz%5_m5zLxU6_vcZBU-8i|NN5J6Hk z%G#6D2F^(0yLIC*dd`rLpH1dDpEzh;G#LVOKCwyA+Mb{jx;e@|6)_s}lir%7sq(L9 zqZIu~Mpm#}tTNrqq+EW1A2KVNdQ%j_S) z{AQ)?xK}HW^vERaI8Wv4YCEUXHX=~(%9V8ouuZ%?U){H9XZ=8u#HbUXl&hYrJ%zKY z*z|BmtbfEL^(NzA5C>RV9?!q}Js8-)^{s>u6Bz-IqDwh2?tE&cngb10M{RWcGOEGb zbQX>N#)Vr!vWFe4QGCzm`YQ9_XI5C1otSE_NOw)mh}W{+Xv;&hs>*L-ewrZEaY-D8 zkbN|BnU602wWD*IC1qS8c-PzaerES{D*Qdh>(@OA7t{9 z|K}J-&F>zNE?JnF(5NCbtM?J!nhErEeAdt3U&QT3L)oIifxNz*g0jSw@&7EcNTAqv z7l)eES`V>jF5dKX2JKSms4fkIN8o}N^H8~!c+zhykIGNFEtjvT<~ zM5c}qNx@=Koq6#FHVFwP`99J~DU@}IU$!GbG)rDCJ7sg{OZR%_S!2XyS$+Lnn6h&( z4%*#75n$$U4LCWi0bV?X1+_bQNSKE|>WxRUYg~(fl&r_JK09UZ>MtrPagmXc)|=nrJG~XHSh$pq#6H30to~J7SiztZ#XpKd*m~?&~&K_mYlz zh{mJY{Xn5~l|ui3VF;U1+$1=MOB`nxLJmVf(|GkUj@8OQ^408XwNDVhASr~hSMPWy zpDETQ*b>l(L;yu^JyK3G>}OyCKBXm)Y8nlMnbCLVmo%$X%mH=^CqT$sFJO~GLg0Fp z_UP#XfH9Q2#Z8TOJ4#LA0Bkd1dI&%Xk&gfcO%mHH&NJx-`s>gyfU8P3koU7%TM8IR zbp!cHvxRCY&L;tLfRo9=jrQ%l-D0;WWg+oyVO8>{`r{7Etu#Md1X!j=1PAz-cn3g2 z9Uv~J9gF;3tjxTG;lOvqQ0gTRqz-3D-k&y@Eb&G1aOPhExokK^ZLt+6Epd9h`WBNn zqR`_1Bn)&7*w2t1Hsny+uY+T;9b3<(3aAZ1*t*ey> zGu{a3Yj#byNq1Me^HIlK_h)iCb-x~N*8$Za3Sh5fqP1MFD-VJzeqTifZcGWF05*fP z{B^mjZJ&vH^mbUtYfR&57IcC6UMCr!*}*=AFg$(!vMY8Iz0U6N0J-xUHW4D$goXf81PvoEe8e80hSaSd z4GyL8w~ZEbibYd}0r_~mfPuTiF9vs~1u^6uti)74MfIfgQT zCt*YH08Cv(xqjVQIjjb;=;aqyT3%Y`W}UcswAjK7()^jFLxdoEq<$zW0Wx3UFN;y1 z(&|zk{Mdza@m4iZrh+5}Y?FFnALlN|DaWnz^D8Q1l~|$IJ5BPuyeAXdA6Hy}E8$Ft zzkcVt+U`vy?kqU&7M}O;>iHt!Zj4;cn9^Q9=06sT7aY;0CG496?V!x!%q;n&xSMFjUacy1@Gh+y zM5$p|V&hJ{C;&3Vit=VGU(~g(<>RzVJKA=x!S}4%=jbwocsimVbS=L!m`X&bd$U^jbx znq;g%+*QSmU!#{60PYWv_h;Ju;Ls+EwYcpN7-f;VrMU0c7+vSQvR|OSYZVR{~n4yE!&cE+)LYJ9nbRRQxA@rddj9gkQi#*uPJt`d;Ysu4|Ebk zNnyD&U%c-}-yALJG!a`H(7Jz@V^%56OrTElbeuJrz97IjK_@iQ(A_ZF(5`H~cfPET zp}#C>5p9rdkVF6KC9%d3oq~d*NM2qk;d@?!aAe2&Ul0TpAh2?EdixZxPvNT%SdKR> zijy=<=lnE9$HEF2=Kb1nPID&ZB{UdJ-j^ynP*T=;_Q)yjuA#s6XvVUzeD1&wD9pG_ zeg&hk$xCkMfUn?eSR|l>P$eNjPF5kQaHPSHjy-_3%!Ie$I+V&wqYUFgYZ-+ysi3Gr zV#*}W4iVLIySM28!=tzZHUfRuO}vElRAqhirQ6vFY^~!>Zf+@i7dc6`@pE6=kod60 zS=@fzF0#LcsKZ}ACSB$*NVHcWBau9qavk+p_EM9<;aP_cNxrivV4l)`igmQ=gQY2q zwL6|4Gb;E1Fn8fAD=XHHG;pD89gh1IlWai=#cfyc-*}&3@R=Hf1CD+J#%Exc*aAY7 z5g;EO;W7%0YXO0Wr-gt13jgh3snzc?nhgn7{LL`Q|Cg++O&8Nw@c7PNBfpbv#X!bX zC+X#dL_7^|ztDF25|z(NUjCFYRX$}CZ5}N8^X5m{1={))n^&ql#RzXs9+POYNGI5x zU;@ZRUZx4I!Q*0y%LJT;N&#hjy!T`H{^a0^la_ygDb0*WwW24=I>49a03KRydk-Q~1W@rOifD*%XCgr%oDkjOf~jVRI60!Ofy&s^OJ z`k_|!nWhzDh9>_MHbSKIx)pWrX&=Pgg0~k6?=lWBGTJ;^H*3t?QSeCo}#Yb2R0d%YvZ+OLP?0}qJf6^JG)g zgSi^)+4gUdjaRo@8Qt0^R{)ss;Z#Xnt?heQWD~IYCK+#=Rn3!Lt^B1p4ymAOTVkVv z@AJ`n-|>HC4w6^LLnZBar9)pjrjp{V7lQb(DuOb|NNRUQeqG3uqAr!~T%#BA zVW+_~u6%emJZV3{YmDfh88NQJ%m8!DB3fVym*?{@Jlr1AmZx-k4{2rOI>CcxUNWiy z!K`ulk~4rVen7fvlnch;0t@l)kJ`=f@C$1D0icNHV*u|o@M~EfoC0~gk)&eUJkm(Q zE5M>c{+L;dQWB<(tE6EQwQE(+je^vLcHSKMJqXlT4G>pzF1)%OhRCozwZfpg)TT%O zFrpTe7qBjRBRf1rVf@IfP}>Cu!<@JTzyv)@LfHRXvpPWddaH5lVMVbjS&)mE65f8 zym=U24o8;#Q9zRX`UG8=G=gd!mSiLabooe#I%bn{!-$=zk}A=2Kw8>SjD$d~T^$ev zPrBm|r&N>p88#-J0~MoL&8gCGaTG%=xFYDJ8Nj0OY2vdkayt%toHv5`)syyKZ9-;% zqOk{ndIr{?b_iB%Ai}nebBM$m1H^!SM!wr%r=2(!3=af5r#S{StGLhao)`y7mnh`hjW!GC%iT7cH0z>6@CoxUdna{Nn0Rneg zVo8M~#?b4#!xf7@gGQS*3Lj!v8nx6|HI`Abksp^2sl~A?FUJGG&*Vh3V2pD4qv{1b zSkT^`!b8#|`li4{Adm`>qBnZp_VhJ{ueu{|zF>J1@MPCZJWW{J&!hn>m z$VWkppD1VFv%D*k*{_H$7vcLP&z1E@-}=NW_?zhhcx$84XU@1&7wuObm#r?B-T|h(_zt#1 z$;XL&Um#6@nX8G{B4yBduh!r{;_7e61OqlS3l`~1nzx9Vq0LAPu?kRJ;gxhvY<`Lm z=|OLKi6qL5AOfwVH;W3MPI$%lgjL%@fsqZ!H%w<(ZY9gC32aK|jQwDdzRMO*LRy}7 zb)G6r;`BOM_!+ZGcp?%sCQ?`s-K$pb5P5zyG_4RR{&nL~M8tc( z`e9YiS03zl=>xG*D^|A6FmQ~bGtN$xe?U@TxdG_BMU?M~z)k8rDCaA5Qt|XS5Q~uG z<-z=3ZGR1LJ3BS4xjg_0ozj^Lr5pmRqYe@A0%a>+94H%(fRAvY5;s2epegFgvti+- zj1f&&&Ewsw4%W;Fq%t~Z*P()XAW(DQ(;zw?JZU3T(|!ZqDfpUrP?Mj_x=~VXKS8XV z{+#SvQS5%QT*$2?m+PlAcY=5k=aiul)Rqt|IusLlavTm}vbDec#Qw5NB!)C6X4e-0 zH));;1f1%Kq76;oe65Mn0%0;x6Uc+*96Sl=j`b`9D>KMF$+2}RYVgB);Gc(zfKiM# zs-JUl8jaKEEd1>SP!>*6-~bJU8T*ce%I@spPYf3a<@dbM%LrmdD|af0^43PX0*+Xd z&z9g_Z0SPIh|3!+TFogEyyIKNAEb^Que?!V!kj_|6Un|ETOuW8=o8z+<8FK{==CXuaFG%J~B|3NNkKnN{Zm66}CyE@#fQtY+V+b8Pa^dw_}k9 z>GJ+~-ZT&t!uXmtV*Blgdksc}GU)?|989l53K31Z%tg3YrH|^rWb}ODPIA zEe^gKqWYxcY{%)H$28f$;iWgQ*Zw>XfC=jB)t{L&a1j>*?o}rXF+0GW3sg@IJ+uR) z4f_Xt)C&%!>o$UirJ0|JFBc2#iL33|7l;wH$oQN)eMFeTPLx;SHy@la%^#k16U*0< zB^*DX;p1tMf{+%0WAR!re@dgc2Wjk`UwgZPNhkLiyIdqk`pWf|tI=0W74<1}(%KLM z)O~gJAs&mtY5xyfKJdW+2!)?i5wn8!yrCIm0x=Nw)cN{ohim?S=Ia_dY&{(<1ywl4 zGJY$R+!T@to>tI4;IwTa(ata2IlJFwHSy!4 z1kvkg{g24O--6uXuOK?v!eFEc;S-RDJKO>l+NgkevOg>STJ$ZyHwWppclKcRvT?D+ zDgpGY@yoL6%?<*-DJ2ueUHc_ENA+YZ811E(HbE>oUogBs?-C;4enz49*;PgK_gBfT z60}Ec){$Kfp-#dRKoI*X4L?uF&>=E6Ta!ICB0Bn;)_?BTJ=PM={LL1`o?OCd4@wu2 z`_#^jHuVhWeihDO5~F8e331hiLviQCP#nT=>bkwsDy6Xdh*d7H?DrEo-r*(P*WdL zezc@Q^A*^^85J5I z{dN8AZV4{yf;6rx5v945%rv&OYsceNV37B~eIUr9`otnP-7#VW*AMrAUtc&lhCh5V z#CF`l)u@R2L2p;A=*`dG)lig;_lDGAlf!g$F(q_MDr*q-H=zWXWHp%QwA^h=@0DdGSjwsM=@Atp<+JW zc!P@oFYavh`BgwV!PQcl57hn`R>f0U$MUOfO@3V?nb%{%Cp(&$k2(I6xpvPLfX4kvFi-a&owwwU*5=qqH72v+T=9_9QMdT%tm zw;_OJNtOv-S^G=VWV@)Np9=PIwb%eI@JL)$KpV!Qomx^}ZN%=c_9~Aqym%b_PXSJ| zg2Z*eT8C$rDAaq9NN;;#wn#3bJ~%G0>Rv;536oBX&uiI>*7^)(2o1%ohymU^q>$B< zlvqiwYomNIoNX>cz145i5s6X82<_3lEld3tw&j0%%HR}K9^YbmD z-5C74sQno}NpLW8FZ1&At?@_|-aEkU4=`}x{^JDtpEZ!0{|R-3Zx=&CeRPXa1(HXl zkoyvW5S~qBoiq*NNTo~2-iv<+B*3D&g1#sTV}55qhr??q65sTj6*PipB!D4YN^*O+ z?l*5X`LA^WU{its6DE9xjHWC?VcqFF2c#)ki~ZFt=7*w|rdGOKY`F#$KjqV>^f zIJ4FN?jwKDD6kTULZJp|1?Lus^q>46&ESs!N(h3#YR)Eg0F=iId0@Ewg&qC-1%*FV ztx6W@Mp_zAtWxfO<*Q0Qo*3Krz-?0`*gW^`a=vCbpgVGoqV7&{vkeOqe|(dtAQ?~h7AZVIo3Pm}`tNqr%?lLZoFXgr zqAYGSvbSFwnS%n1?4uKpRi)VBV6P7Bb2&#MAK}YrD+#Q82ldg-MoZ62qyI?-OgyHP@U8AxO&6wB;cCqCEJ>o~HjHRHH|}%!O(%DS<%%Us$#I3!OS12OZQu zlhR}t1Mq&iF<>OnS0%q8`(#k(;C{_SrSFa470s`G8*&Ga?0ur)LlaBO@Og{`gBNG- ze_V_%fnL%(0KL2%(*GaZu>m2C^JL%Y zg)FAlk`R#!Qj;kyG>129TK+#1E&=<5c3@SCC&Ozdor?x9Fx3VQKFltVMcbg3MkfA$ zNsEO)g7ez~z&i%xlFHw|#xSX>V<$-JZEkfc{X;-~T3Mznuu*Tnf6MLR?rhmSAhv@B zE@q+JS|B)(!+(5K0d2KTN-sTHRT4HX2?@ga7ZQI69ubI^#M}%F6AZ+n*O9K_{nMN9 zXPJ7(QFPoPDYqcGGrQM*&5eUIr(frTUHc7465caY;7SP^)KWCeZdz1tQJ? z{r_W^I;~UD)6oee+dzq9_8^2h1Yrm#wh>D5C%^$ZK%-5BXdRM5Cc|O zok2=zOB8$mN?QQrMl_fuLG>bwtF!75U$$5bPPDx$vQZiMwA(6;&l)blInjsTbJhZB za*JY^5(5eo<$YEEdmobq2*j4ncFjqV4B3~hRxaG}_RQo+#2V~E)4u)1EDR7zzN!7G zZEsgJPKoTOngW?-`*DSWh1;1BU7XVSw)|&lcg}@B8rA=LIn#pAzP0=IM z<#`u@8L05(qvEq_@1vZd_48iA)XZH)_89H!%bW5rzLqppXwCZ*+10S70j8Mi#~R9Zm@g zY3q~d&3J7?!>{Jugk8J#PNf8qAnyq7%rn+Ch_gZS5j zfKU`A<<{ytoY}ny@b(TKk2gR%T-IW4^yfTit@|0*o%JAd#o5G>;_cqHnhp>Q;ooCE zG#xSD%S~jUsvb%+zQvX`%@+=JUB!%k8-$_11hk%hP2>9_WoBj;|2Td)n*0IxAN^eg z_&GzC9^}3dAP8@;$pmXu2moFfs~%s3PbXs~zx6I0@tngTFtn*SeZ}M`m*u>d{K}zH zjTKLVQ>?b*@m2c*<4tw+&mTo*TJC0FFW)72Fwzdz7pk{~$y@*5TePuwI;=g?8FD^ywBY z|C4c5E@SBox6!8!FlP)I1cu@v?QBa~>M1AB2E;9cXG3 zq)a`@*rzcwmVpk>fmbN__BfI}TmZhlh0<;<`S)H_RbJ9VYH1Q3tlSl3<&x#T85H&X zN(1`hT*1ysW^y_`W_w66>$G?5DC87RleT-eW6Do!3)3y7TC)T_%eOvY3tx z+=+4z*NZZ!mQ+3enE_5t=io{`V5t9k706?H9sF)A%R_uH~&3Uaz(Q9M1LugkjC z;|8^D%rT4S?KDEwTz#b;NzC_Ql=MC)>EBpD7Y{~0Bte2KsqP zYS?Aqun%)9H+S(v54L&?m;Gjr=QrjVU+{_cBDN%EhQG7@(wNT^UD6>Ms__^UQ-3du zkFV9&9 zKI7K`{R^&}!JW)dh0-sqdOVQ@lv-_*c3N8@x>F^)3Un@{W!?QB&rx9m-g!IQ6EFB3kh@j|ZHPwCH$uTwbkb@ibauPC00!>+{? zC8T_OvHJDx$p^EsEK#wzXot0u@;{M~CBP`;Jb{aZYlre!2L3HbLjefVP_+h8OM0Mc z8X64|_h0cHoUUSwf48;=kP;%r-J%bO`*-)toFwq}Tz*^L!A=bK$v3Jh)bmgxb!^S7 zv1gC$iXTo<(C@P9W@?l@$;M^%2h#I%%JIiQ70b^z#GMHNld9J)_rwMF4ZR2MoH zKF)_*^o?6oi&!}euM_nc9~g^zR2zuAf2}3I{*H~N#SV#WSNbZkI6ax``Oy6JyqF6~ z@#cX(qnHt5Z}-cFVVBK3aBU*O@x5wUQESW#T=k5`@IJJg{ugWA`iQEDW^vKBM}Jz5 z@n9_jpYd;aDE+_}K+B7Ch<1eDk~8k+VCaYH;8G4vOS3+^vPKiNjASGC#4hW5i!CJj z^1p?1=rE`&`l#Rhdbj%uB8D9Mt0sWy2NVK2^?N~nY&TW>)of|Uh@2e0(z!6;R|$m4 zmBEGGT4nX5eWxuhI;y-@KdQWxf~74OViM_zwGpVnFwOFENtr{V2TC3Kp4KS?1NWjk zmQct%DsJ+5H9$U^OT4hedfWYt->rW9!c4gxDTfBEqQhG$Ex%k5dz+AWVB#)*Yp}j=5pOG>%N~#cXe}r_gVsNWx~&@ z{4%(_z|3zTmv3~%x2(SMOtJ2SUULUeNaj(fCP5eYEZxd1$EYmR*=Xzzw~}-SGA%ul z$5;$nqjTb`#t^iQ7!%e9UZhjr5Os;P=iY~Cjn~Co@#f`FEiK(kO|~wtt)C&XRQ4W> z{pf0dEFK*t?OKRFNJ3Iu<8j;2Ty*CV&p5@3*PknQGC}h>Y7d*FE=CK;^9%bHBc*ri zLSO7Jm}I-j;F9?zmJMu?%UJK@SuQIU%?ceVoJk^>Xi)bDL#z#4Aw;(jDtCR=QhcgM zw6_LK57O~`bGgyc7`V?F5#8${|K={1-p??Z{fNF%y2*Mxwwvk_#gB=3Hcb0+(>$EM z{Q9_$QHt7~;?rm>DOq^yzFCj2Pvr+a%xTSVB(m-#@wC<&6)9FN9liBOB&4#vBB)h^ z9^3a%PJ`VUTF@#;mEWAcyhPU&tL1dBLGdfIMjL%*FRwpxR0y|&G$JV+6tk5ZpHzL# z;vui6yoV9fh5R686mVTVcR!wV_}u3?RpQ%y@w_{xgRBDzXr)W2${Ks&XK>7zb|Owh z^0-w~7SXz&*X^)U6x*omdQ_rvDKF?dZW*21Q1U#YMhyKxOO)VHfc^U%$yX(dtchYiXMn5Y_l@VoV=H$z1l9tn= z7gOc3wUN)Yfis^zWIs}VpbqMkI#*kD{3!y)m)lb}#!0XC61X#J_jB$7Y+SZ}BFT)Pn!mWR>FvKyq!{ zXRnwY*$yy9KC}&$eJp+f^s+N~X9A)+iAm~p0j%tDr|yXcH)Xd=_aeW(81Q8`N+D@H z28U;$OK6PDMCLd9uz7Z|wCOZl_MX@QYY|tU3Y$hk_cYnw&FY?yY^slXp|PBRF_xYW zJX0*17_Y7qV^jgr37LzI5_0~_6@A8cfdukCW!vhdI z!I6L_8TjF09r*#~WilN6=CT}{Qk^J?Q>OrNUL4gB<>F|YkaZMR2l&f&b2Pn{;$jn09a{fB9d`R0Yl zt_M~XWR`Vb`;r`y?cYZDw53-EXd9T5#-#%U3>J+;!Wj}%Ifdr#l>Y9_#h0}BYUQ$03=_^)4j<;OrXO9aBMjRFu*>5MMPHp zaVQ$lXXpzcumS;cAm1vDdf644J*J=7M?lNAq{$PrrHpY0{;5#Eo+bM#D{x$>&6x1cIhNXyT2rsB037Z8U3qrt?fc1&Ce=YuE zcY*$fcuTRTD<@#nBqqW2_;Q4E?9Ct8hd zTqruaT+%siCmTQd?J(eEwKiJd&5tzE;RGLk*s0AOSVW9|UiT&GS3b=hl?b0QJ}trC z_zXuvvr!Xtq1K<0U#|(G%b{?)gb9{mpnkq#TMWe|7rx=%Jny*~OW7rMk0_JF8d&Ac zy^YoK;MTZj=Wm{jYuuaGWC2xaV|$P|>Y9RF%4|?4?1yAab05$$R!qE?yOT!zqeJy= zr?os^ylbyt{G3c=`yKt_2Jma~h+4t|BoZ4sU-KS!5aV#ilmN~pjAf65CJ-b3+u0GR z8!IDO?(> zF5Uu^qoViDS!eik@s4AEy5xT{`h2nI-{xe&Xz+Pi>|-*R=jcRp?e^C0X8v-!lA%w% zDQ)B2N&gn(;0X7T-ALvwEVoWw^DGzHNIf43xo};AWvi6IJgmWW>+#*f-KXodc<%Ca z=+?QYU2$AFUD`X+iUG__sG^zYUExTWF)TrybPYnZ=`ebg$B3;KR)w)R~=~=;-M64saH*+&>AM2qW zUv%21wJbzKF^tp~JRk4Jc3A;WMIHcDFU`&Exd5nIE*Fe?wt)KAZ-uy!`n`IDATNX< z0VB+U9HAF5v^}`kG}0(x!FK1px|_7JIkUgzW9ejbw_i_lG*33}n&}qk#g-(XIuh$6 z_)d@GZmljNt+WKgUIxB5qoowAulVLub=*zxr9G~#pJ<+M==w78fcS#+-2!O+OY4f|_Y1LH@w|F%aQCnt57%3kfOb{olkq1p0p*r2 z3D`9x^za5CVSYkM3(Y7Yv>#-y1FTfV)ms1-6Icwvkf8Uik^*fcKme()^4nW{j;n)4 zF9{gol)7IwLu&!IRSK=TUt9fjDEL`&v^kdt8f7@6!?a#C2}CtTX= z3TeeNeIfy!FP1!r5GN*=pZ~*!!j;Ma2G(1!EW$JuR>72yk7{3KpD?ZONvm9>e)+)R zXEpR{8}wSa-i-3(6yuBe_cR7kYlfXlLAS(kgC6`KBt8!?yHj&fQ8E3tpjTe5A z9eIFzD(V$dIB}2S&$abdYP}bBWG_82S^pyRR;|knQ2os>zuxESefx69@uF70wEkPW zm_kxhlhA13u!`(Pe?XxQpo(_!lOpr`OTg98GVAmVsNanedo`AXnMHNe#*MgY zU8lv`myJC*+JkMUvE{!C^V7`XeHym2LR~cyZEo!aXOZ9xuSYlmD%o#Kpj=W9xOq+k zUa(`PrGUVkVI=DVob>30eXj#Ff?S|k;S=JwAMzR|3RY8J!Ie6U#kj|FYVm0_V-8X z6iR#L?xa&mQ%fR9E=Hn!YYT>Z_Xw!#mT@M!RBaqn+dn0J`L_ca9nRriT z$F&|M+$7F&={SgZV*@zBiW$GWY*o>7UU~k+9HMVI?y3X$w)x3T@c0x6F2@xZ$vDKWc0l#Wq$|s} ztM~0rs=yD&TxVN6)*w)tHB$DB)ZnzPlWuqEE6O|!GLSFweVkNZfnqjF=m;Pji08|h zW9Ay#w6&uUjdn*3SgVTaFB&}n-0gv~Mk==xpHb?NXQ-D;1g<7he_k<>w-oI*NYXF# zN6~ry%K?h6@fstWK5i;!7tA|vXmmeec zzm9R?P@za)4`;9$(=2ztKVir}kKswDlkr0%;r9S2XLak*WEVzgU@7=O(C1dV`Za*g zNeCxaaSumw&;Qe7svrQ+^}U#jY29` z);{8-rUmeE0+l<`64J{rQ9*(Q$4aNg`uDUE^WX1t6R`HTu>g#&5{qXvvE6!K~KSu=&ZejP=azQ$C%aJeq}$MHeNthb>ccz zlAOR|5{(by9)v#p4Hfc~NE1MLdXY%b8lY!Z0f*lR>KLCVKxf_usM)%At$>DKoXkY3 z2J)Z5`Up_s zY`a~TmA!6USA`k=#?Y|ra{<^*%{3ndM1fm>KB*qH#=74`L+>^@KA-fEMR`Vrv(JHE zWP)l@rLY-12+xwCcOMJiYEeHOqvTA32x#mYEa&j1s@AV3rs;;Oln)7wtp}rG&9;w1 z>=fu=F~uF{)aV97Mg*qmWt?p{xP+x*U_U>1T8{0Lg>mWnh%@k${+w2^UHy-YD>9CC~)g}A+R#Q=$zgp)oD-d<7)fc zI>#OG9B7dR(V4Z^e1@8By#7_K!xmUH1xv5{dGy6Q!*zkVS+?L#8xY*fKoEota8dYjjS)NIDhXE{73;x z1{Z2eOLl{t+4vb|9!`|Z2$uCN+-RWq{Kx}}SWEy?dPx9Q?Cgni7b`pL)!BVz&N`im0bgcK*Y_UxX{!x2^9PkLTwXZiRDs6|u$I2A zXbG86iS`xW=rwhHj{w9hKla@#D!6sbvAQQ+0oq#CaIZN@lWrBv72<9}TH+7OjsMzT zfKR0qB2Hdg9;2?|#l>LbAf5^03=h;g5%HtvY<7N?!?ONRnv1;^zPvW?R)MCqVw&{F zhxh`uPHzbpiY{l?HF1YoR0#V}#L|Z*dPtsfnG4K;jg2e;!;D1YgCJ;LJdj21uSd=9 zT4&6^_Dl(Ikau{S_RCEj!GOLn?HSxS21`n(@spR|Jg`YDe@}Hwc-g~;pZj6T9jDg~ zvClg&-gW_mNHZXewnia|7$EM)u=?EE2iWjh#Oq+ReQx~;xLb7E6q$<3k4X7|E3qC*zAA`|aS-P}z-=AWhet!v-$$UYh&B{$+Z)x+eG5VgIp& zX3{|L7mVHT%uNlOAqynzpG0bA)k9PxRTk;ywaMw$mDJErbsQ#}CBkFen?4gti^oi~tB_Ro zI%M2W@%i(eqKdGt8oBYs-(JOow5oBO)`dvpHW!B8;sx4T(ggw-A3{SuNZZFBw7Di9 zG|YjMXNiB9?W55DT4{6i>nk?IUiu+rRXZLe(2m%!B(@2>OFGBM9KQ2eG^*o+jf?C`R+W?L?+jMv2lTPsAT4ozfIDG${A;$_M4U3jM=1R* z>Ub0m;lh<35dIvh!nHZildvR(V0h#tB2UD0c-lI_2s%L4}t!T&XL$*K4^(_`~<5O z&R_4rln_=$q2N3CjZdWJMj8{#kLh;QxV+`G`uADjgUBie^;~h99Bnt5%->w4 zVAb2w;={06$yb=P-!k8cL@89FOd)HnDbhE0wNJ8atK}uI+;<`cBTPmK+}!1jupQxt zyfbz=v;(bG%FHnNvTbx_G_-3>rf$p02?1hSiX1?GG&;X>!efAO1lC^`cpPD&O-**m>@>s`(eR`4SxC zQR4C#abxoC%UN$NWBV~o7AV4|91Me+0&|1aw#trMJpkrX7^KOq*z8K0?)}tO)#H~Z zw`^O^hzF8FPjLakXogAtfVpM}LWWER>rmw6z}YYLr4hb0j=1zO_IGU^Rg;0i$ZNUt z)6;4kND}~9@d;=OIt*>B`}0&CUg$xkZJDRC?$0TW>)Cv}9P~Hb6L~3|o?o_zdhWLu zr7IiExt9tN|3sb`9H=(&?nt`o6WpN3l5A-p=Y2Ew;gC1D?%pKxs%|TjZ~s zxdZ;#Y9O3|j~nyQ+2g=!c#Co$ebX&@^Y5sEoQV+eePu@%E4iN?_u;pX1TsabRW*7x zTWcvP$-E+ak`0JuOt$=Y^L&Iy;bofeBBX$kfmqVVfsN!OqxKB24ie*nj~N(OyY%B= zf`QE6K@bc&UKxDFw%?oXcs%u;OP=S_6Zi(kztB)*eOYYxi6Ew*^*C|2))zq1 z>$&s|YqIH(*7>e==uw3v`BOxZNap28V@eX2zBr4nd&^EJS@SJ)NKyYE;nxIPLJ>pHHv@l7rh2X!|>J>!%6EU!)N8vBz&tl7kN$#Vjz%br+{4wV-nacVS*@| zg2Sc01y=sJQIV^_YHS$L-xeZ_(TWJ=+`A zc}kgzC`YkcPEM-vaPUEzrHdARV9C67pvXyfEGqV#SbQ zCFmd=0c<+EcR#RmnZzYSI!S4H-?8}>g!yyznyg-F#H=1gbzs*3-kRU7S-u+aEelWY z;$a&qvOI;YLeCgm-W%+ex)(wu8@l{r5$jt{)_TRIF+(up@}AsVO@74E{aJo^A7C|n z?*@jS6@u7H4MZ!m{KhBaa}Ej%SHE*5plt#TmsaN&%guuviPqI6($66`bg<-6y0F`X z23Q&c9AiN1fp+z9SuC>YrZgJ-46lcLGW!s*^Rl<8qhSWGR)bg|$g8D>@}C*jLCDxf zTO}|<3Q;VB-6)7^s%qEOO=ahEvo|Y9;dV49+=RpBZGkAnZJP{oBw1Jgg+5hDJtHbC zq(L!>LB2Q#)5>^liH5PPfArjJDF{xv1o}UFry1@Ff^EFHhRP?lG+SNz{{~XL5GJF& z=pciNepeS+SYz+|oSXo~^lmahUs741Xx#hpE0jU5?CqKxVKsKH>EU4Qzo^#Cl1-EaXu8e%g&g}6JXrol_~ zzjjHu5a})&@L%vC9h!&R7(8TcuHK0*AoaW^HnS0Al^g&`?G)kf{YZc<-|^w%q;>djLfTC`Ohl*d#wS}Lk|PqCWH0y z4?+xIi&n3%iDmw?D*pM(L3Gn$|DPGQ632tax^MozWd8i_>4`ciIx0j`QxD=SUQ^7w zRQPrBq(*@M%+;S)P6|6qh88GOf%YqpCSS3k+I~Jp`G0+h7FtKwBzIPioi5p_Ft?l{ zy*vN?EjWwb?3mGo}8|cNQcFXhCV~bxt~C6yO7b1d2#|D&_?X1;&31$JD~` zTe``0Z)^xNQZ=r>;?yHj_zVC13;aNW*Wr@datJ(z(&RhnHl^f^RUu6J2oycb9q_Q4 zUa3-RDZQ%CA6ezqlSZIRv+mb!(fDoDziO$HCi>UkKfU=xFFL-#LJQ`xuWkSfb9zbd z_*8Q5eU9{H;g*H+5`HqUUO}RT$H~E!y99Y9dK<$?Vstu5E#oR1X|-3^JJ4h zT3T9G%H;PeXM1Zn?RCzFKwj%z!4TA*M z&}{}V>|-dkRtqKM(s(udmDUyy4Ln87X7fsvHz!O;2fRkVDkRzgs^Y_^j-aD|7Z16D z6TFUQ65x4HU(o?u_Z>reQkOWE3>_#1#Mt(88$(o+UYrsN+m)bEGS19S(I8Jmjm}8^ zJKy+UpY>yu@t&V$p!&t+inR4KWRM^@%WA2Fk;MGCN`fJ36)hj`~PlRR#KRwc9EA?U_=H#Vw|ROw{$@Hm)&9- z3^c(1{se&1`*hb8ix3?y#f!(w_DAET&~T7TqfYe(PcygS7MR#7_^*rh?`M207UmZwj^(!<~BS_8|{RVzqyq84%tEr$>Fb4 z^6#?)Bpg-?W)z zv2UM#tK(1z_fl~PV7+r_f1WjdvNf;fIc(^*0^m~9#a}?~cK~!z)#=~wsWFb{wOO`f=nZpQ$Ot*46HzIXkm01Iu#Ydj{Mt(lJ@lyd4%s0PfC;R6d0Kp-8A5Xp*3W+X!hir@mCnG zQg;}O%`iC9c7E@=?EP{dh)Tir8F8@Vp%iXGZrrPW79gmOz(J8Fpzs*Pe= zq@0wv|Fh)Lyief^-cDPHp2&ONC=Xr%A_@n9WPR-t`L$)unAj7>-+QjE=aTy*Qb5Ug zGt6^TBg;Pfok}L*J;}{z;HS>#xM7{-uF6#iTmb$oKEK}g%@QaoGS3m0dCTw|)O?do z<#tT7zlT2Fr*E9OXqy-28g>NpT`o-E=)2i!8^@}m)U%cu129O$*ldW?;YgcD34kpP zAXG2`+5mueLhUD@Z#iP-?j+K4;o%ng=voDn*nBF1_BtjHEXBwFQuX8$svrptUShd9 z3FUtdR3M=xh5?hGD+;kMIw)%cec!h&K~D&Kn)}6U!fVk?jWmxNvw2`WeF4#^#E`gd zlkqHj!5P-0X0_%5&JYf_+v5D@mw=CU6E{OPXxS&=`4*sxF0N9*i!OzuD^c;m{UlUB zY{!k@?Be2cGcY^Zv3o$*@gaI{1^=zxelPKtLCEhyc~M+Zk9<2IBbkfSwlaGv^_2q7 zIXijRdY|K4DC2clh<)$)0qczmAO;c_wOsukeCGb?K!Xdw(YS?OhS@k~tmz~+;xo+l&N5)-BeLpOO|FI>eOGKYqXIBnr_t?%7%T3z=+PQluGFVr-;`)K& z?QAL7pz<2%a5o3(X|w}%+`t4Z^+v#rfM&7u%bPt+DeSK+3Pn#S`7JQV#HBKCS2X~U z1#_nx2Lq1_cK{c$1loUt`<0}nL+_Vg@>c+@C5sPd>5n3GZ`2rXT)zg?z>!%Q2pMp1 zrPzOZDq*0yEyG>($$O0BXnsJ~Hc55d2O2xVB>V z&LQV_-1GU~Le z?|72xt^-PS$*g8dgqXYfk2mp0EF*+7&B%-`6+8|+R<0nuhArj{hFcaf6`I1&P@@Pg zstDzt%I&k?8;qWfw-sUP{z}z=Fp8Ko;TXImRR82lk0aV9A2#g?(EP2Rq5xh=SFLY7 zZ`yU_!m=G0LRkY2_V)ojIy8lhuq~hw&b6?saaX`gVUNzfKYS>GnL3Khf^2xIfkSe?m zh|*)pkV8OT`=CvuSO*aRUF)-uGf3!Cc}5I3m}HfQsz@Rsis#|v83D+@7l7k$f_d}> z)C7OlMsU)p6YBUR%>fHDz!AmnFPi^*DTQ7f?sA!%Vt=9d#OuS(dgTGNRO9AJUzg8l_Q~uQ*6Z2PTlS!~$0IQ(IF1 z;ee~yal~?B>^$#&5;sKj-7e`@{^vm7a)_FwP}q}nimiwZw{0{8o12xK?O~d^#`%iaRJ02~M0~M=>iG*#v_-?2`aPFasgDE8hUTiUK(88D&cRjG23G)) z07ZYd?NAbDK$H2X&Ew32=W5w^jgRY~W@1%5`ZDsbM3t6OU}OHdzgdA_2BOM^u7cf9 zf|Z|&uWba&aA^(?6^FCLOn(Cs*Ho_DuSAyUg6qUVPVw4SjiNx!4{w=E_VGGz)2Fan z2f5=}dbNIF_ov%%YqxA|&8+B4;9T_}9!hR_(BKV95b}@ub(`?cOf^bH{F!$VvZ$&t_=D9uIji9Q_ToFm(M`Kc1z`Mt;p0K{1FV1!)O z>imI99OBaa2AIubM?IGULGfbTeU%0a#B|j6ROv$yQ5Uct4gA(;ohEag` zgNPc9ao%_F_gT$2V4p+@zh=JaXh5EPx?i&y zB|YKyupinT?7%oNu3)W}mP*cJp+c2k@kZ}9g$p~6Y5C*|9d;@eMK-BPVU4|qOB))d zgcNMhg)%)Zs?$@qV%$beVH~ajWdr`ms`dFGhx6+6`$5kiQZ2W7PUYZACY2Fb5^T;h zLPN@Al0ELrL?e|mlZ7fXENe*{?v|TcI z{%u(W3{$jh{tc(Lvs9NB3D^1h1$O+|yT!h1#Z@Ka;QNZW$z=Wb=d$0Z9!#L#HvB}lD?s^hq)Wy z2aNO~Q$4qj2iBgU#im?zzX_Sk{GAedV0$?6TM;3X#H_Hz4DQQ1AxDmEK)8CDqr-K^ z%OtM(BQ@S6>OJL^t8&0JkRahvvCHla?6j^nEq6c`OdMXjLX%ZN{Iwh@tICRp9(eL)0o|#4hiFT?&#ABg zCH&EAROU3|_-9yF{@E*Y1QX+|T8I2FX}%Xs8>YHLLi?X#`9|@56f`rV=i<>s{idz` z)D~-uy}DlcEpg*3%?4*gBQ=&wkH{vi8eMa5s-PA6VFr<7YyGNBTIp8<`=++9p8ek9 zV2HHJ%HIH$E}t2mTNoThpEd#(GDg2_wGN~aW7V`Ylr{n!KmR;_xpbv@y8D zq2x9|tepo?BPzd-nFKi$M$e>X48ke~Z6gPj)J|(rD(51hV&6)^_jGOm%j2Raf!{>q zpju&vk4Al%7ZZfDfZFv7M?2=d#DG~L492})CU{I8{W$|sOw^rixos?+N4$-v;vo%1 zOuY)|T?5alxA2kbq|85B04IfF<3ANPN@ULEv&-MWSnn1x3b4%-)&P;Audo+{)2&_? z;9JQXExcVCaSPo>mMbu)-9JqAb-c~+eSNBX!?v~P=LF{ykAwtsBr!a~ks2^m(>7ZE z?bYaTi*jpPw^K6od}ZTS5N;h{-r%%92gS4Iz4CK=3gZLGw$U%cj#!pBCW*MHp|~wL zQH<_8(qs78p4%f{7#Ss>9X>ygZ;2aM6nk<03C^ZsQne7nxKDoPqlyRSbW^*~26oSY zHbGGMh;^PJvR`_r^K}D^Vuo2?r=L`D2appPVSp8S;@&{(=dSP8%jfK4q>iwyX~U-? z>P0s2`E~SuT(O+ZYnxU`aS@c*HE69$4|Ukyi7j6O5xZ;JCj(7G0wFHaDm=*i0%}Ph z0;AbYGns}`Lu5*~|AUQ(C*av*p-&Xc1WrJ})}WnV58d{Ah|VSt>nTt{#=p&MIPRj{ zh9#jqB%1NJVW^K#B4ea%ON;9o@q9STjgHX~U-R`08}D~Y`jyV1RZ8o=WJ7YiEz;)c zDic@ij#Jb~-LrKHT`)R?Bf-ZHtiU)aIVCjAJnul{{kG7qf~4J9+!HsmAg|xr zmir#a-0;%iuZVBzBLleF00#D`c=U<;Ro?M|@YT>I{VTjN5_640n-gJE1WHn@Y1iSS zH45~K;xxY-d`HZ-0XP1q@cM_h^NB{r$^gOl>(93ewD&{&IDA_o?yyu2!Un4;#Jg@l z{AAG*j&%5(y};ItTlY~Qu$iM zR>D8d4GRa}e+!sodneJa{an*zdiW~CE-okIzlV$0J9)mZVMtIqeHFzC?p@s&30hZ4 ztHi6|XL#12V#hd9^`oulGTtaVj4WZO57$VUY(4Z#zk5z3QRb}T)%bKvg3U0Ao|g!& z43AY+@HFq>Z3FGE#6NZwHY|;yNHgMNZKPRTI7e!a00rk?rU2*(QBzv z-JLSfXI6HFAG%!9^%g*_>b(u()fYpX5tc{stwjN&S5VY86Ck~U^1fV4?yJWz?k6}N z7a@c@acLTgUlAaxPIwtd8S{3t@Hw$GlA`&^i)Dgdj~%D)BOCVo^4yn}kW0&l`K>lc z>MeVE{`@wQ0N=|!jB9ECOYS8Gjr11c)%kl3$mMVMA<4*rxA!L}Rh{GConn!AySNlT z&8i(#-f~|GSjitj1&Hg_db3c(kd?Jhg@ zA3E0@t5_t`K$a99v6vP=O+T$fo&E0*sWXW#&!DWsfz6y{UM=5rxg=s?S|l}&!ryz8I7HN@vo;Lmj;FidbBdg06Tm<@@Lwh# z3v>*4S&LLwefNm9IbDlSA^~bkmHf0b!f^CyQeb-T{(?+0a%>&~*BXwb+KA+WKSFYeuhmiwvieGT z)Dt4BKcg(AX_&6Ev*IuESYRlr{maF64Aw8_c6_Te=6&T(>7#O?yT%r_P22=?(bYkh z^fwl%Yw3yG&zm+22%2kL)8=Z!+1Im+uq~ATg_p%vC<|ttN}#uj&CqFvIUv_k5FNS zIH}r>I3Aq6H^uNY`An7c#YY&Y3;+O7Ko7HoEBt#n!#e5`wdg=~gFA0^ThilJ)s?TEUGMLV?_lu zouEUta0M<#${psN$Eo<7doCTx70UhCfwqfshh{n>ji`{t^>HH`YsBh^E@9T>Rk!8g z+4%uKON~ebe1-kEtwZOA^H_p(vLWfF6fKW_ntEP3K-JPsg}tb7@b)Yq(y=hEuBIj5 z#gJ=}U10?F2uX|c>r#m^NaA?2N1~CgZf>kgYuA;t>~h|)QKJH7yhw77rhi@_SiDH; zXhVQVyB8SrHb2Y~H7n%gfo4l|^s#yyfn1>-zU6RKn=jwPo}a^Hoqr~#+700jZBeG7 z!WGB5hcZ%+ruc2U+1xo^>^_fzS(Tm!?aDF|lG%wgF~Ug)JmCPncy2@}Z1+Wtf4}VH z;hwg6`$JiAk9+Q_oB)E;KzIN5-&upCJwWhM)<-v=LrJbgBaL=%&-wD}QbpJ(M=@h( z(xz4UMp~^F(;i)VZe9NM>2!|LPLXDybeedAAwL#FLzYaI>Fp@|6ye0gDtAzfee8)t z5|B}$5Wy_biTsM`Vs*F_Fd^oKbymxPPG=5CJ`T&IX1v3TKPAjtilsaa`hroq($I?5 z=uewxnp0n?AzSq?`B zWAy=!Ok}BB9nVRO-2L}-w7wg}M}a90jlsBqe(b~}!_Nq_=ncuPQYYRW?=U=Z&org{ z#7v4i&8-N?uAbMaSC}R-{WzBf8LXd+MFnn1!OS$&5AFhRdybxS0Ko8RQXkaZ9;sGv zE0OY8JC!i@SJP`J$-T%%Y0gq%uzO#4-(R}MCu;K1?^dJ&A&WcD9Y1JPTp;8c{g5Df zY2cbG;sSW9c!aRLdv49nxG~l1sKbelD(D5Mu#=hB4|&kmo1dDFjiisd8i{lB$Ybn9 z^Q{+!iq1ih05ru7!2J2}9VWcWaiu$;QuvJ+uB?$xb*`6&k97JK$t8`heoKm$U`YM7 z^@44G?pZ}@j_j@R6pLrt$aUg7CMvR~#e?8Vs~1u&t4Ph0i)GPkU&P(UF{D~z-t%SJ zgJNe{--`fOde`nXInmdCDa2cJSA5Bdc&Ygr?t1b@?X>2#PInJI5i`npx}_c$iCa-3 zB694!xVhDuJ1PZOqc@E)$cQ}rBR(V(Te*k9)^urSOE&5)c9$XUyu5S+_pSPbz6GHV zGNb4T9eK4}^a=l3v!=jKmH_J~|WB(H7EhO>LFGR|A%R^nxhU^B=808tUML=BCrncg! z#9W~~*oKUFcOl(W#DqkPQ)1Q=!S5j|p6Qq;^7EV2+JnI=bHO&gOpvh1J%xixsa$!w zvV~ivwKoRdhN-vX0kC9y#NvFC0aSfZ(W!)$n~|WnTrT@if%%`Uyi_ceIny z3kl=fxD4z(?{8EX9;xo&x@n@$p`lL0jUgAZ#6^BZL3~PLL13_TOIJA6Hu(j9Pj^m3 z>EQaVEw|s=73J^^*%@NYYFxELa~x3pdlCQa_v>9m-cW$Wf+q)m4BNfYjt-WYr1Wk3 za1C~sF{6P1RBuCSXCz{Js0Q&91x%xF<+(pb*ZW*QU58og_WLH`#~W^Si7Q!TrVwO? z_bMsDdXD`x`Of3fXRJzy28EX>6vprLWKI(J+TQgOJvV)VxbN}sDzF9u)bkqRSflky z&$h0?Y1t@QDNW0f;aPsa;o~{#Q^ptr_Gn(G@81rMq&yI7VAxuA!TQ;~Fo~hKu^G)w zJHdAb?F@7#a4@L6MdXqLDQ9(|%d7#3a)^Ohj*rZeZsre53t;d4*8L)q*@=y|;cbNJ zk%Yw7NvyDpcNOUYA)G%GH8p!tFa2`RSlbi6E^OX@);PWgt(*`UdG1baf0nWB*(N-v zO-})5WJA~@*22E2iojTBm$b98#+W{ef7Iv{FP&8%Ww{^MfozdG4yW2pOKcN$YicC= zK4P0-V=nJ;*qWV$snzKSr^XVJjPrt_w1RanL;b5u{WOy_)HGzUU<1u+S1^XJ&*q3+ zqfv~ud?JPU+i3QxoSiq9124-r9-a(WcRtbsC*Y*lv!iECuaAdz=*i7rRb?p$3CJQU zP-j3ArY$UzykQ`k;suNwCUrx*WxTJBPhM`=fT$hT^Q-z(63%5d;EfTCb2JLhnO`II zqsMl#et2iL<0P=PZ&#*;viW(KYwLTr;q3b?p&>lj9-%!RxN5^%Lt304g6~JCRLm3G z&s%y1Dg;;dWv|fg7*K|_1^rg`Eee9vTQrGc-`#J;E({-!~QE{d|YX3)DBm z8SDS2t*d~lYU{dySLsecT96h*K{_O)yBnmt8)+0Iq(NyZMY_9@?k;JN?uLI~-+StN z-|sksaUJL06Fb&kd#yR=!a?pr7gwQE{B+;k=f;`_nRg5uS4V}ZP%@2K)28Vz3xc0$ zpG0^|&N|J`ok12-b1bB&?kn}UiIgrbW&D{X@s;U*SomnZmTBzXcg}h(~LK(e2Y ziT?v}BbRr}cD=D0Hdhll%&3M&pE=kWmlTzA-^3yWSTQp{6-dpu-3_&LI zkV$K;``xTxq<Ok5`Wi;~{ze!sWV@I2d=DRbqn!bAa`l=(5@;f|>0nzT2`$$@)D%|DsKc3ewBWDtYwVh&a;*OQdccso#rd;k1RaqZaS+@lH3x84d5mFOAKekO9 zs=ciDVo14kTUBkt(4-%(ce7pj$fp`_@{+9eH5K{!uxA;-z6`vEt2K5hch$i@@*#%_ zhOgMURdaw~<~)XbnkPR-d$<#(a@G5_;K6AUQSuTqVbn7zZ7=z89%tSNH9ltsg!q~w zL9$9Dy$vxfm&HKd8(py`p0J3Cw%A}^r_js6CL(9XU2_2kwgT=aVSP{$O7s`V>`c!T zjox^#Zd`J9@UImV9F8tIF$5pQrMD%g5~!llJv%La%%rCJuCVYE`8W3(Wpyzc{8F6Q z5QjLD4`V_HO4}1J>sEX$kI?laxK}IJs=9S_E@P&pG2o-&A^058^YsvC$$LJT^CXXj zlChKVyIid?>ESQba+Z^ioEVvx2`jB>h-q;Bt_3^FaM5UF+JawOyE4wTmkp~5aI`KLX<5p zVvt*Add$zVFku(bFVC+OPdc2OKyGyeGKBq^oXS1|ByCVRrX!KqWvqU90tK`Ii+BfZ{V{rpc@hY|+JL@D5ClUKCX6 z4EwO@yGGW{*O4H)3mmv)g?e-Z+?QSkgW2fJ$M*$&Jmvp)7{)2cD*O%%`gtg<#_LHcNL#SwJaykP(A_PgC3 z+Rdmjwh8wHZB&3l5mSt67{Ln(`FRNU6B^IRd*ZfVcj=XPgvh2LIn33W<7+`%mjHM9r21zWT@yVLc z%X=p%oQVi1=x}9DchbXR==c?%=4vi}?hv}}hT@rq(ZadXzx2St;=rr;(hTQ6l}k>T zSOtK&?q)}&$d0}fG-Q>DLxKEPqL~{XV2nBePJ*3U@`e9tUAFuiejgajt3v8QXQWMZ z15I3bP{o!Whl+Owf%ltYJG}Z40}6`V42erNhK*5bcT?js6hE_tL4!NU##N;v4VHg1 zVVE{%|JNm1V%Gpxfj#8 z)yLFbR?$x~61D#QcdGg?JV(ScxXNY6bmkR%DWtQAvYPCxUud7d?t?-H+k7P&R2%D2 zf|Dg6{MGMoX!1YL@#;PQKmx<~DSC?MeRXV;)kq{ttBX??!Iiqvrn@ z@2}4|2%yGl9cATmuUX31!bLjV|2AR-#`+H=zp>R1_bbN)E4c$-L8tzI4t^M7+CW@m zW&j(^mHp4TH=u>ywTuWv*Lv79vzmUNs(MxW@ZWF2yoD%)jO`hp=$k7Bm|r?1kbfY9 ze~ls`GIDP@L@nB$50RtlIDfw1&(DoHBYFaQW@c!K3VZ!g|GhoPLLsD5I`?RI@zz<* zYvh|;aHI-El<(L1KvGyi1PSo1d$Igu22>6XkN{I<_vuFY!NI>@47?}V3y8dM?u=a^ zlc*Z54>?r?-Afje=8u7I9Ihjb3WK9M(&7XyY6pCB0m#o&K+*OA74EMV@R7vYtCr!+ z_FZlm4iO*E>^3E23~98Xh@j)&C}!2wdeD(WL+G4aS}#-Ai#$GH;Tp}$L~SN}9NYbn zZ#YY;B!ShK+Pw7FypIru1IYB!!3e*bTkcEe6?$aI}CVU|GVtvNWt|1{I@$r)LWP#(rb8SbdL87GPl5BpErRMUkY=c@^ z4#QWV)Oq|-c;y$=G8GO^27qoN=p^&cK8a%mW5v_}cy?PYD9%oy5L1m*NDrtN{xGO$ z+%iCmpB;q+q^kEP#bdJb0C;mBxl5-$GqY;vtHiUa8qQx6kG};wHGLHukE&KB2S+OL zX=kAal)ZZ)N=6#UpilVBEL!U0a~56$DDUaxCF?34b;{}PJHHC{P`b3!$Q4e`n#B

Sl=hRgJWo`mjeQ0H5KwP<_fga=yZP1b7l;PY`n>&m8Ipg z^zYEtGL!K(4u5N0G{-2pTtF-2`OPsP75zAQa%x^YN$wvLpE`(8+58l+3Poz3YK0@b z3Wn*|1$3X(ds4JQolz3nSxE6uegXHGUqfndv55PjSojyB-o^u$ET~#cbX@lK^^*r$ zX)U;v`%k__|HKjeH8ThkV8o+l_QmHHfCE0}1rUg>p+Hq%C0wYI1^T~Ifc@+M)ZbZx z?L1MqYYZPcggul4hOwH64iADLTGHl_jsdUZWW5!cf1Vc%f*0Oh=nL=O4pAkUrqac8xt zewo;BPkr19Qb&t<0z5kF7z$C8w1bm6M5=JeZjq*?iZzT3+WX zC&1yI1OS#tw30kD8=}u3#d{1Fs%7NsFPgZudhgPG{cV(Ah4SOknH+aTw;!Xj&kO)g zES!eRLVy9=Ex^bqM$dvBL?;vxI0b4zvC6pNu#{)K;{n19wjRl@Sfe&=0N8BgJn!kO zh(OQ7*tG=GzYB{N+hnAo= zTJ%@Ib7Bbr&@ij;yn2v$O)C)Pl?MFCtR&EPhtR zoITB6zmKZnwvlPM--cfS>z~w$ZpAY6d9(Yq{VT)CI*0P5|4zEN#UC)NXaJwR(zE1)u;1fWxZy!}1&gqc> zLc~z15{XOub1$eOM~?w5{WJh8n*bC|CeOusMzZGNJFHY#vN&_3JL7>(@lYDp?_+=n zHjkHUtI!V=E74$cJ2K${S}5Jr*(um*-=M{srj=B*3_(8hi)GoUGRrkUP)(-S^~5`3l69$Hg5%m z%B7FK1D*Wxg}Rs%@Cy`VV-BPQ%;g({w&q^0{KFyvR^nb{Bebl>i(G!6#;L=tl75ly zZY26VT`2mpK(Fn7JWeK6*=mF+11CV=X!h(tiL0vVQ0V^arTU;qA1vQCc=|z5Ys_l? zVyt5EWK>0mr+QCM;9|Vt#t;B-6*T)vRU)20K3}O6ePAne>NG_8fGCawABXD|XrYiD z=Xx&?iYkNFRx1Z^*A6cw8i#t)*dj2*si}o0F9FPItESCM8)(dY+OFBHe?MN}b^OId zY*RWlKoYZYYJwy$ih$TabzgEtJK?t-0O8=LaFvPY{*DNDn=bDT&gT@gpFlx&hcpte zV!im7XkO%lQW#K$_u@1MGE+CN8pm(AWMky{%$~dM^C1nk{qCTK#nO}4GZXk%5@~MZ#N^V-l_bFFJOP!6M5*8dkT=npJfJQIFfjvOppI0> z&CAL`(5p^G$(u+~^qO9C0Q|@;(guvx4L+k0XvG@Kn(cBC6GvYTobEielIj>T%`5R^ zJ$=%!APifXNG-VN>%K5%8%95t<5BU5*>1(YU-k)%na||j#d`pJTf5BUe=`Sev;_(S z6p>!9ieJ~^iDVO>rSS2R?P4`_y^2R$hUownM^7n6g$uy6yzu_2r6nSEs zbSr>KK^&V7g*eVK`O>@+s>yhb2ghECC2#gxa|fec&~7s_+nydX zc%UbNwFZ|$CAv^>VNA4veg;8HFI`88Rv5JfR<8%7#I6WN(ddVO_QUKBtQ*e-@tQHp zqxA3OPRziE`gp5^GPU$2j^l(BgM*{HRUf1u^gGo<1Db%jJWN3X7bUsob@tY ziNDv`5mZ>Z2zXKtn+kJ6Vi8xS{A>aM(LphxDiY6Vl~JWJZe$mudB}eQW@urrC3I*1;#zjl79iDEoFau#^en z-WY?5+!sx&uy{iwt?Cbe`x`EEto_`q)E(EMmTik&=&k}A0-6E`rB}RLg)P+;OGxWo z;n__T!q06rPZ30#9Ie2THYhJVC)-|W^a^kdPG%P`%z=1H7;PJWjSSDRqQeGM9R|Am z>`x(>YuDs$8y{3RDOG*SJLCES z6x`3PEHgEy5#ssz-^!=eaXbtLYC67)?ffOV(E6N3x7czg5%R&r!T9fQ5Vi)!#*Cnv z9!o-`RCz*7Li3bl{>bCf5Wc>1;3x^-MZ8<6Dm%4~Ox4NcvmQDAu|m^a!n_mBX0q<- zpN+Um#eY{#J{cyhv>S0+R?unIQRNaiNe>vG04K;Pe0k$$!!BIN-s_`n`~5-rr6x*l z)UK{Dt{oHq^OGiXPYq5PE!emE?E0;>o(BqF6leq9E^UK_W?x-4Shf};nyL%wIvs(_ z3(x>M);(;OKJvZ87QUtF(_)}e^r!9+7u`b?bA`+FwTYI|x@3&@3uA^T47Up7Z@MiG zivE~u_V0|Ajs1zegKq16EULvX!bSi zyu;WdrSyR+4^j2g-0NFU4cj(c`o0d}>J4vKR~!vV^k-;}QlCz8bpx=Uc?d)Cn{xt{ zW0$)$lJh*P#PTPq~fFVPR%cwy#~+!1EIBEKtSNDGqL}bBb%Dd zpamjvf?(i33|Zi=46H+PUz<NJVMsNZlYcVNA0-<#>R#_Mc7=^xB`~)SwXp@LiSoKqmN@ zr2BwfOHU)YU4op3O+PJ_3l@pL53&ys6Gjm|3U!hwurWzJf!K6>;PrH)W{|8zb%JEl z#vx~!qN;0LHm9@xS{z4`VSci^OQC-5$)oZ6z+i0w7b7gK)fJDGT)?y|lrYoBoYl4@ zTI`=ljgJ?wx~;91k5Qz>+oZsY%W}bS+jF~f``vJl?Q9*oW`*hMb*%~JasRo;dk#Be zKEr6;6VGyTGHu=ABTjTx?C44%ix0_?Ou$OvNajI5lihuPpnoIyruM1_L!A|w$u+*e z01&3CD;KUih455W+&t@W4@9b^T~Ol8iJ8W8-PK*d79H>Tt8HuRepBUq!_a4A1d(~o zXaR4FW$JukMdtw*K=rLb;}F=Wql(o#$YYwkf`hm;7Ps1Wuy&NJjb*yMPI%Mh#tu9Dd-6M6O;?}&7tw!X!- zZ1zVKA;}$q?n6IQ^1tFpre&y9$d(S(yXpudCQ5l6O?APvk1|$MysR>d8J*?P{YLp# z>+}%7-m7;Gb5MSF1v@OA*-qk`J)NCUpF7Y^GcbRC&5&~j0(}nb4D5+Y<@LLfc5%3H z`De_cn2QO5wCMuAYi2)5(ldLSatEHpHlbC0sm#45A8=XwLj?ZdFShu=#8fN79;~~L z1Zk=r*wTr`Wf7x!ID|q&-Z*3`zAojOri)ej;xV)`D!uzbbd0+&?RmZ72#I80AN`un zz|)h4)MBlTOK!4%{f>YXNiTWBWffo28}|&~XF;4y$z#Ycn(8)*>CnSa>f38`ARzSN zgZiRy?#e^+3fc2tE+YW~oc8+{0tdl!{0~$*FnkuIz1I*kDW|bO;4txEUvNk{+CnGV z?vK#nDE2q6w)_k8F%r-xD~PFkz2!{ZQXfu|H4nP%hw%OxnSaHZeDuV1mED5^NWzm^ zjt(3re^2ngwimLA{O=A20yOmOnM6A2lO)amT%@6MN%su%jogi$fS$Z}?Sk{6W7~hO zP=^gD&_h`5u}E^P?*6@7p6r&t-tF&brce7LezJH?(d-pYE-oCwlKtI^{6EOa(7U7} z!J*XPz^$m|I|_w}6y(EwjQlkg{@TVp<%Pw`I`Iw2U3)ne`g#M9RR0w#1?ZY2m?dUM zw!0OzCzIsmE~}#7x3Bff{wJsnoX7yv_IGNr=4MvP13_lrX#d!ILvICwP>nhM`8esz z0nDy{boW0mV+EI#lr%QRJMBC5DmZdHRQPY@tKaP+3kbo9cIT_9EYNxIpTiBqr0bbl z*r54a7;-2R>fZ+qMq~BJ)DsGbC-c2mk}>h4avw3G)wY`*+lw%v+B3M9<86H$oyg*#EaP^j8}{gshhfon~qpyCcNl zUmgCx7cULkk*JcR6iv8Skc2EO@%yd+8c*JGra}_Ds_~q-M*?jE|9yFRxFfwpX18}M z(=BjQ3ZkN-)mq))3aEnoc+2;y-g|0_j0Fa*%QvR+?wA-@jO9>;+LAirQZQ>G?pXpI z>)`{29KmOrAwKyv??L>|v1&!6{zjysTv0%Lere;v?NtPwcy3>GenE!^y-W<|OWcPd zE=QM%`2|}q#eWJW91D=)YwMna0iu~a_v+YIG#VL`00u7qL8iTA*9{ksa5a;b^stGm z3F4>SHHS(2HqKf|a0Tw|$p^h4?yOHg$h z8k#LS=;Xv7%Dg;t>)j0UR*?+JNI3Wan5>iP>gtO#<3;GFqo>3_z96c-ZAPJbi+J~~ zEzGrZV&F>r!=2F#PLlC;nQIJwTEQzOxmDGUk)A*s)@ydt$_n{J7)mg}zQMuXCa)fX ztsrg|!z)c!5P_$WYQ#-Z6cdz7f5HRUrbIk$r66pZZbdhkSs-Xm50oN$u9J8@tEGS- z#?;i*Vx4qMwwr9CCZ=6mPR5tcDZ#kO4>~4g)7oZb9rIn&A6V`1L@Fbm!uY&DXVNfH z%>K$;-@^OEG}hB%Lye8XM7meX9`_Ng6sk>zBO(Q}BtmPok^;~>s}`^Mfr5=zEs1G@ z0LMM>H*3F?R}ut%MZz9TVvOg6tc#JQ^b-vOQ9uj85#3L20C*SMozodcN#0#o|E62v zHUbGu`#dok33lFjvo+pI@4Bn{_FJMWF4;@g9uw>0xHMpc20dFeH)GBoMv{iI;1Ob?bjPr!c8Ar5xtcqS=zOdMMrYq4vnNA^AM=FQ!$;zyqv@xP&j zH#}~LD_OE8H3*kZGQG@uHe1NQ@L6=!uQ-kF+a3*kwK(RtC$KZY8O^PtxTk2qKj3FW ztSo+N(2hbYFcr^_R|l04el3X`&! zp^-c9y})=Lxer^Pf>&>lmL#gzwz)M&!$l#B`%lnXX`PUh#SoSr+bS{oy*hIC77A&? zMhT;OoSCOrQUNcbuIWdT8CF`N7qSeS*ui#SR?3b;sySbfW}&IO`kq7w66SAe(%S>0 zz=I%rza1FEDZ-5J#}hfEEwM0|m~V*HM9l8`}?Jer;{0vK~23oM%e)2)f3$y%5dw*drMYT5X} zuI3TU`o|UZBl2&e@$<2$#G?pi4)jCh*k0)omWt}YLqWs*aO7x3D>ab<4DfpsoYA1K zLJ?eUsVJ64{k{{f>wVoM=R1xJD(U0wHtJ0@1fF(nK54b@|Ar` z<>*?sTH&rJ1Vw3>OvKYKX%Qe|)ejZ$tpUu-Ex>aw0)gumJWxG12Pc)l0z_cS+X>B4 zgvWmT_y|wpqWgey1H^0!5}Jh)2*jPuH4o;@x}Uv@ahbf^TVEMFDH_Z2;c1_R!6QwH$NdKjwk?h^K-m_Fg^`RUQX=&6Vb%w$VnC%EobzBE zxxi}rY&)u?(za2#6jlNXU-v?hY!3tQ%muuk2uu*7gM!|2qnf{EDWr4!?8 zNeM|76uzf59G21p^7%hOrP&tTdO@qjw&L~G` zd-t4dG-~#n4(GGhr|-V=JN+m((kS3Qjof)Xy0uDcR8VJKzPGN6sy=1mmx(m(j4nPr z)D&F0mKgv;V1N3fN-=maaDm{FwSsJ{k^f6ScP*=v2_l};?t{n2_r4v>D$EJ%o&hE8 zB6^4if@eGde^|da*xQhOftbza-X=NE6+uIkvt2_#H-TjKAo!9Ant6~@a|5y2Z4ef9 zR4jxB3}4~eQy*$=7QYu1#58$G*=!{MYiEdvJawazpmiwZ)@Jl5Amsytxl`ZTdWOMC zQKylPR)MohwY~_D4ydV9GoJt?TAfRYLZP~mLvFQO;mL@fUQ-?RccMh|J~gY>HyBMh zXO>#KRVp`N5>_Lkv>je%kY0b&25JQHK_E~&i(n#~$54ccj1x-(G}7jl0U07RHb+GV zwfE$Ze7E7Yeij7$fi8G~N-aBHJT&34Q+wF=`4pO&$)G|FAvC&li%FxQyiwDeLl|Di zE7`STJ4L%)HY)i-`qVbR{cRRBc%g6>0R`@awP7HOCug&+^UQSUAtmD_cAl>XAVZTF#$*Gr1ujtZ9hx5r zH$}AYAzIWAVKjmU-HPgxU(s~IkIQJqBk|2T6Y5=@AsHD6>cTGvi}re9Ul1QVY2ot{ z!12wfyyT-08kl}kxu3a#tg&i?#5cj_6+L;*mB7fBRd`%8o^ZWVlzf@l@$JZthG;0R z7RLmrN68jI_L?cvUFxei3Vh5rkC|w7C}e{mI}Nut6QXbPEJ1Rh{GdxLDD25Okhiek z2GU@m8XciT<4I;0M;j-D;a(#F8?Wm2%tgPEY0~gnLKj^{q>n2RB1?7 z{u|Nodr^}6K@z&?u^}fve{E}Wq8rT% z0;U!pw4um&FojKYap@|ixTJuwDu!R>} zkY8JjCAm;us1JH_l!op}W0kc#3X)FOm9?N|tH>tqj>EUlU-HdTZm-rdiXA~*bP{A2 z^ua1+&_g+X4Y-=DSuZ*-rsr>r3_1eQ9tkL`8RAZxeff%#`PNf*s89%XbV8Ly!SMk8 zZU*M7K@3=Q*B#4}QzP8i%L^_p4{cKP_uFHdGZy;mE&k8ZQ%ZJ>$sNuf`E{psv#|F` zt!+L90Ccdk%He^%;9$Rdd*ESj&MVUS#gX*QlumIUfJ{KM=DM1)I2V+40r#yfEkl_c zKQ%Fi$0_F)WkFMvBwdkf@Xp*g)R49wTxwsgN@m_@D5>cjXu&_Yq}td1l!7D3jEFuc zwTGkT5bbe%W&ID~sR#jFzfIv(iydbfu&XtphpC%=3)zKn$JM}qZj zVVgNwo^Dg>iGjG`EvjF@#)(5H6#ET&DbN?jGhSU?FlECjvGFgnMD7D*eve+FZ`@CH zN__>Q=5j^k5Dx6LZXT06!QvbNv}FvLu4--@%w&BJ(nzBuJ~I!#{v0hC&)gJ-=0hVd zpl2sw`bkHr=o2@AvpAAM)7QLsg)uMIuFIb9>g-R}`$8ZR*i5!D!SFU@*QL4%bQQZV znrv!3)*D*CSBP53BT#yE+s7meZ$LdNkhlwJ%u}-z&_lvZ(ucgUv)*swtOxsb~ ziQ2jDHNn?Vem}1^TZ1S+2U+QDFet1~E2T#tQ1;4H>u@+fSMhig2Bb9X-6pcRV%0=o z?Z`FVbnNCm?FBy_#}qbIuy9IaPsKuwq|1cK1C z?b|#;dftTYh4I4Sl10|;IqyPK(SfG(tYf`t7dzfaK6~ zPg`xbid1IIUaP89W9)&n$77Tqe>^ae7e>aW8 zpk{ox&f^jp$#49`kqJtM0ArEB1+IvmUnOeAl-j=G%7-sB3=G2o6Nrrg4Foswwf1O%wx$1!~b0Qpjl)&~#zGC-w4@mL(q(_Q5^*pS|I@S%@3|LiPFT2MM$hjoNUpIA?)w!Vj-`4FRl2I~gqLC(A^hI!#&KU_UlH z%%fJhvK@xsJ12GVjQCup1pTg-dniVb|DmU~eaSAmM@`erJJY`c$*JgY#Ifc`#Bx7F zy|T2h=|i4CfH$l&#dY;QNJ_W3&Ei(*G~0uf`{pER*W(eIQ#YDWiv1VV4t`9D7^-#*znj@gwW6dfQP?Ge7>8-&I-w)Z3#L= zrDiTaWIx(V-bHdTOeN6d%Jia2bhFA!g!$EuWA5aoJ;gpIjQ(bfdzM&u)ac651r)?g z26rqDlOewsS*~gShLI_*)~!yeD-L1g5p1(jjok|zF7!Uxj~X*`FS}KRzX2@2IWL-m z*`&1JlQR~Ah?DYZtHYE#Y@+-7&LrY9K=_KgzX+Xkos0`aIJcpM9W;vtB<#1R)7E_M zr?cM;lpd<4Dr;DlJ=X$pdSQ*AHJO!ySvl(4#J)RBZt%Fee}nGzD~=akP#RmOM8yr`k#c1ly&4SO4B^+0XAT+JYPntilm+&2qaGL>G(GPN%in{`suTYi z#ErowE1m^e%8cTst4~qV!3w#Qc|hF<$iw4zc%#)5OJL)X%*=s|xfuXJgj)yOUx<0G zvZfRPouSVi7d~g8N5zDDp-=}TY@DD30$#Pb$MjNu z`frNARZ6yK*%+97>PLQmQ2u;;D{|Ja&A6%gsN1q@eg%J?WqO06B(AwuwYs6&9;a(@ z`-*qbI*o5QGQh`L1>G}IdZKHKNF`Q0q0TWH^=ArTgbVhacoTj9PtC|5gbMfpY9TN% z=q6%90*br$fBMXBSOj#)w)XZVNg(GZ=c)>@Q|?A)eFegJYOT=97zE^1MPoR~v6}Rq zuS1{mkZ2yVp}HL*RwPO#8CV{epKy@3%An1CB1PUHL3t4qQ++d_d!MRQEq11Fbz|E| zXyEWh(%rwqq6R%$M&}3jadlWo+z?KP_YE|Tz~Ylb84#y5d?t*yk$BLGt!(*bmb0GC zPzn-=3wzJQkZjaWT;i)PH*l$;o)(dn6^-~Na1ptKWvyMoWm~NHj{`X zSy1UvUPdd#Hzv8)87_%9Oy+o0eEsU7ZwZao`jAub z`)xM`y?*?z1t$?m-?0lh3uNQ^+@tv3gtmGbiSVpX(Y$F92=RBf<|f*gkcwj|@N~ta}E=R^`<9CDGTxK>a)=D+FKO)+qjSwSHLN3~BNqbcAK&nw zAnaYqyFLxD#U{MZfS~%}A;ouyy056Ei)ANOzt2y9 zeH>!-Pn(^8h<`SLC&R$Bxh5}!14mGQm)TA;G_`gEEB5H%lqu#Q_(CFES_m`)S&3=&H z>4F$$*hHt%CUJiG8BY~YWresRv3RWpu6=b7%>+X3Iizr^=(6#J%$O_%V}k=fg8c0w zu^Nf3!49MxsCJ( z0O973MR*DZWqwJ?mkt*ywQ86ZX#eoT#Btw~^aKi%{z4E6Si#y1ZUweL@kGC{Qhtdz z;?gD*HN$492*a+e@3MI9rGaiiM{8+r3_lgT=nyctfsXOO^Vv@<@tR-dQl_x@)zTScscSt(*x>JO!pf=M|6)=TV?4?9rA*9#V3iQt(7TvF zA=Tnrv*Kp9{2KJ7V`1iH1$mY^Yu+eR*0_|x#Ari)fCAW$(VtqmLOfld+kUVc`x>`J zOZto&8thZOM}thVr$;ll@1c}~F{~0Wuo&d&Qtj!4du&XY&pGV0FsA^%5{=juXh!nP zi(}mZ^xC0tD(yQpBK>7t^=uXnXhx%0v=NtDvb}O8l$3974w_Xo)z%BZwV0{2ovfi* zsWkhURTSb#MUlmusebwpNg)Sy$#E3-CKQ2KxjlTnnAF1}@oSxGO_EptOZOGe^H)lq zQLL3MSO%Qi=9_(`+bUKigH1g4LhEc&N3A)`Vg_B`iIrl7bzLyOUvh9c=yV<0ZX*nm zLfzv@s>VCL&{U-AuT&(cRb{&<^nqE=7s$zSDyNsYXLeO#r@jwyeG!GL6wW}?Hl%_07I!m^uvema&*EF^|**|ReCU795 zxAZ9I`5g}k(wD|fOk>c%Z`UX%@_klYCR5bMN>15U3W)JFciECrQ--? zBSnbzFP%FXcFtOZv}o^140`xX222%JYn@Z(+smG4I)&tXl5gL=lw`e|b$GeIN{x{` z{nY$QK#h(@av3)uH=>-xE}vUb5azlghHU2v(q87b;EjA+&3&=6!q_%FhwVI#QAnw#x&aUJE>b>FWlE zev{+OWY+?@!PBQu!8#JGC2RL!*&JX8KMpZF)!?(}Xc#XQ$`P zwSIbVs@0n|$Z3toaDp_6c=d8h&pFv6W%rO#actbucIRB~Ywf$U?D&?0=b0-lS?|s+ z+-{!}LNF@%V0ZP*l%fZVF5rILX!j{2kQL6|4MsLbk5Ja0$va(IJ-cb}ZfGERYU!U@ zX>$V8xs=^-88wB@CW!sKove_e`5GM){S&vhz5om?gzdBU>x$LSG|6XMWtx(fBNgwL zM840#5v>Sb%rWYPNMj-34{1s|4{3;NVDQ8EgkkT6Wy;FUM74>zu2-JM-6QvgfkPns z>rW#gWQrwH{vGS6ofb!y9_r?G>13xo3xf&A-=@-ekEVpA)DTp4PmaIg^YK>z$vt?@?J=z3bA zsW{kU2XxoDXxx9F1kc@vY0~lXhu#omli^>F1OxX4;z5!{V_v?lr+&BP#h=%tsqoHT z7Ut!+T9vAUNUreWoACVkM0r>U!lcn*__JEo=E%s%U%Yz%^=tmrTDVY=VY_EKhsTEC_L^y?oIye=ppBl5osTq~#^H}v0WV&ReD*-H0)ICi>ww{hIM$c(P4Zwm zE|hK2Urh&n7H-|d{D?4^leM-8(b zw6PP`j2PN}xt zn|Usv0G~qs6!nj$#zJbf&EwxNa6haY4vukSK!a7VFpU*^amY-|_;_w)-kDjYaAbJc z61OgO(n3AoZ7KKQm`U@j>Blkc^mWgUEKfK`qPPp*;Hy982ItF0gT#kog4DC`$HOO! zqKp}Gk4qorD#=8uw439*%Syrb_Yk?6nCCwiL&p&?g_|x-P|Gh3@QGqe>Lr9W^=sJ`UeK3;{0qw5Ko*e_FBZP`i(pPhr<^)0RE0+s_vxMdH@(xZ7$HIVe==3~j* z#1rm4-%#luo@}?}N*~hB%O#`z+*)2bGJuQ&F@>+9VC>)C%pctsWzl(EH&7cUmZ?mXfd(tg$^*rWs<+?y0OgcdfBCpEJtuofSQ&%A}J}91BO0qLZ6WMkl8i zG_n-*%`fxB{p&ZuetPerL%OZhL2>Op0q5hob9{Ji{icZKM^iB1pO~<;P`;q9@BaaB CIo)Oe literal 0 HcmV?d00001 From 24e71630c347d78f4135621ac179d55fefe4d8ee Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Tue, 3 Jun 2025 20:37:21 +0200 Subject: [PATCH 21/27] readme demo --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 335a62f..9275f7e 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,10 @@ Pytch - A Real-Time Pitch Analysis Tool For Polyphonic Music ![screenshot](pictures/screenshot.png) +## Demo + +If you want to see `pytch` in action, watch our [demo video](https://youtu.be/LPt83Wqf2e4). + ## Download and Installation Clone the project From 914ea5a39fa3acfcee828b79a49606fb3515aae4 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Tue, 3 Jun 2025 20:40:18 +0200 Subject: [PATCH 22/27] readme wiki --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9275f7e..a9d56fe 100644 --- a/README.md +++ b/README.md @@ -5,10 +5,13 @@ Pytch - A Real-Time Pitch Analysis Tool For Polyphonic Music ![screenshot](pictures/screenshot.png) -## Demo +## Demo & Wiki If you want to see `pytch` in action, watch our [demo video](https://youtu.be/LPt83Wqf2e4). +Please have a look at our [wiki](https://github.com/pytchtracking/pytch/wiki) for an explanation of the GUI. + + ## Download and Installation Clone the project From c2bec85c5fa944fa8b682aec83acff62e35e52ef Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 6 Jun 2025 10:46:17 +0200 Subject: [PATCH 23/27] literature --- paper/paper.bib | 20 ++++++++++++++++++++ paper/paper.md | 2 +- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/paper/paper.bib b/paper/paper.bib index 063b798..e7a2628 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -20,6 +20,15 @@ @article{MeierCM24_RealTimePLP_TISMIR url-demo = {https://audiolabs-erlangen.de/resources/MIR/2024-TISMIR-RealTimePLP} } +@inproceedings{StefaniT22_RealTimeMIR_DAFX, + title = {On the challenges of embedded real-time music information retrieval}, + author = {Domenico Stefani and Luca Turchet}, + booktitle = {Proceedings of the International Conference on Digital Audio Effects (DAFx)}, + volume = {3}, + pages = {177--184}, + year = {2022} +} + @phdthesis{Cuesta22_Multipitch_PhD, author = {Helena Cuesta}, year = {2022}, @@ -143,6 +152,17 @@ @article{CamachoH08_SawtoothWaveform_JASA pages = {1638--1652}, } +@article{Goto04_RealTimeF0_SC, + title = {A real-time music-scene-description system: Predominant-F0 estimation for detecting melody and bass lines in real-world audio signals}, + author = {Masataka Goto}, + journal = {Speech Communication}, + volume = {43}, + number = {4}, + pages = {311--329}, + year = {2004}, + publisher = {Elsevier} +} + @article{CheveigneK02_YIN_JASA, author = {Alain de Cheveign{\'e} and Hideki Kawahara}, title = {{YIN}, a fundamental frequency estimator for speech and music.}, diff --git a/paper/paper.md b/paper/paper.md index 83ec2d0..64bb25e 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -49,7 +49,7 @@ Software that assesses the pitch of a singing voice in real time is best known f To address these challenges, we developed `pytch`. Our tool is currently the only software that enables singers and conductors to monitor and train harmonic interval singing in real time — a skill that is essential in many vocal traditions. This includes not only polyphonic genres such as traditional Georgian vocal music [@ScherbaumMRM19_MultimediaRecordings_FMA] or Barbershop singing [@HagermanS80_Barbershop_CITESEER], where precise tuning between voices is stylistically central, but also the practice of non-tempered tuning systems found in various oral traditions. In more detail, the vocal spectra can help singers fine-tune the expression of formant frequencies, while melodic and harmonic issues become visible through F0 trajectories and harmonic intervals. Unlike many existing tools, `pytch` does not require a musical score, making it well-suited for rehearsals, ethnomusicological research and pedagogical contexts focused on intonation and harmonic listening. -In addition to its practical applications, `pytch` also provides a flexible platform for music information retrieval (MIR) research on real-time audio processing. Working with real-time data introduces challenges such as a limited audio context for analysis and strict timing constraints to ensure low-latency processing. Researchers can use `pytch` to develop, test, and compare algorithms for tasks like F0 estimation and signal enhancement [@MeierCM24_RealTimePLP_TISMIR]. +In addition to its practical applications, `pytch` also provides a flexible platform for music information retrieval (MIR) research on real-time audio processing. Working with real-time data introduces challenges such as a limited audio context for analysis and strict timing constraints to ensure low-latency processing. Researchers can use `pytch` to develop, test, and compare algorithms for F0 estimation and other music information retrieval tasks [@StefaniT22_RealTimeMIR_DAFX;@Goto04_RealTimeF0_SC;@MeierCM24_RealTimePLP_TISMIR]. [^4]: [^5]: From e061abcc6badef73a399aa88bbcc27c2cb8bfa5a Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 6 Jun 2025 11:17:25 +0200 Subject: [PATCH 24/27] acknowledgemnts --- paper/paper.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paper/paper.md b/paper/paper.md index 64bb25e..ac7a486 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -89,6 +89,6 @@ The main GUI is organized into three horizontal sections. On the left, a control The right section, referred to as the "trajectory view," provides time-based visualizations of either the F0 trajectories ("pitches" tab) or the harmonic intervals between voices ("differential" tab) with a 10 second time context. Using the controls in the left-side menu, the user can select the F0 estimation algorithm and improve the real-time visualization by adjusting the confidence threshold, the median filter length for smoothing, and the tolerance of the gradient filter. F0 and interval trajectories can be displayed with respect to a fixed reference frequency or a dynamic one derived from a selected channel, the lowest, or highest detected voice. Axis limits for this section can also be manually set. # Acknowledgements -We would like to thank Peter Meier and Sebastian Strahl for their help with integrating the real-time F0 algorithms, and all the singers who contributed to testing `pytch` during its development. +We would like to thank Lukas Dietz for his help with the implementation, Peter Meier and Sebastian Strahl for their support with integrating the real-time F0 algorithms, and all the singers who contributed to testing `pytch` during its development. # References From bde3b8f3fa123eb407d79e187e04d307733eb183 Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 27 Jun 2025 11:48:24 +0200 Subject: [PATCH 25/27] improved gui refresh --- README.md | 4 +- paper/paper.bib | 10 -- paper/paper.md | 6 +- pytch/__init__.py | 10 -- pytch/audio.py | 328 +++++++++++++++++++------------------------- pytch/gui.py | 129 ++++++++--------- pytch/gui_utils.py | 14 -- test/test_buffer.py | 6 +- 8 files changed, 203 insertions(+), 304 deletions(-) diff --git a/README.md b/README.md index a9d56fe..0093343 100644 --- a/README.md +++ b/README.md @@ -37,9 +37,9 @@ pytch ``` hit return and sing! -## Contribution +## Contributing -Every contribution is welcome. To ensure consistent style we use [black](https://github.com/psf/black). +Every contribution is welcome. Please feel free to open and issue or a pull request. To ensure consistent style we use [black](https://github.com/psf/black). You can add automated style checks at commit time using [pre-commit](https://pre-commit.com/) ```bash diff --git a/paper/paper.bib b/paper/paper.bib index e7a2628..e619fc3 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -1,13 +1,3 @@ -@inproceedings{MeierSM25_RealTimeF0_ISMIR, -author = {Peter Meier and Sebastian Strahl and Simon Schw{\"a}r and Meinard M{\"u}ller}, -title = {libf0-realtime: TODO}, -booktitle = {Submitted to the International Society for Music Information Retrieval Conference ({ISMIR})}, -address = {}, -year = {2025}, -url-pdf = {}, -url-code = {} -} - @article{MeierCM24_RealTimePLP_TISMIR, author = {Peter Meier and Ching-Yu Chiu and Meinard M{\"u}ller}, title = {{A} Real-Time Beat Tracking System with Zero Latency and Enhanced Controllability}, diff --git a/paper/paper.md b/paper/paper.md index ac7a486..e6aa802 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -36,7 +36,7 @@ bibliography: paper.bib # Summary Polyphonic singing is one of the most widespread forms of music-making. During a performance, singers must constantly adjust their pitch to stay in tune with one another — a complex skill that requires extensive practice. Research has shown that pitch monitoring tools can assist singers in fine-tuning their intonation during a performance [@BerglinPD22_VisualFeedback_JPM]. Specifically, real-time visualizations of the fundamental frequency (F0), which represents the pitch of the singing voice, help singers assess their pitch relative to a fixed reference or other voices. -To support the monitoring of polyphonic singing performances, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record, process, and visualize multiple voices in real time. The GUI displays vocal spectra and estimated F0 trajectories for all singers, as well as the harmonic intervals between them. Additionally, users can adjust visual and algorithmic parameters interactively to accommodate different input devices, microphone signals, singing styles, and use cases. Written in Python, `pytch` utilizes the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR] for real-time F0 estimation and `pyqtgraph`[^1] for efficient visualizations of the analysis results. +To support the monitoring of polyphonic singing performances, we developed `pytch`, an interactive Python tool with a graphical user interface (GUI) designed to record, process, and visualize multiple voices in real time. The GUI displays vocal spectra and estimated F0 trajectories for all singers, as well as the harmonic intervals between them. Additionally, users can adjust visual and algorithmic parameters interactively to accommodate different input devices, microphone signals, singing styles, and use cases. Written in Python, `pytch` utilizes the `libf0` library [@RosenzweigSM22_libf0_ISMIR-LBD] for real-time F0 estimation and `pyqtgraph`[^1] for efficient visualizations of the analysis results. Our tool builds upon a late-breaking demo in [@KriegerowskiS_Pytch_2017], which we refer to as version 1. Since then, the tool has been significantly extended with a new real-time graphics engine, a modular audio processing backend that facilitates the integration of additional algorithms, and improved support for a wider range of platforms and recording hardware, which we refer to as version 2. Over its seven years of development, `pytch` has been tested and refined through use in several rehearsals, workshops, and field studies — including Sardinian quartet singing (see demo video[^2]) and traditional Georgian singing (see demo video[^3]). [^1]: @@ -72,7 +72,7 @@ In addition to live monitoring, `pytch` can also be used to analyze pre-recorded # Audio Processing The real-time audio processing pipeline implemented in the file `audio.py` is the heart of `pytch` and consists of two main stages: recording and analysis. The recording stage captures multichannel audio waveforms from the soundcard or an external audio interface using the `sounddevice` library. The library is based on PortAudio and supports a wide range of operating systems, audio devices, and sampling rates. The recorded audio is received in chunks via a recording callback and fed into a ring buffer shared with the analysis process. When the buffer is sufficiently filled with audio chunks, the analysis process reads the recorded audio to compute several audio features. -For each channel, the analysis stage computes the audio level in dBFS, a time--frequency representation of the audio signal via the Short-Time Fourier Transform (see [@Mueller21_FMP_SPRINGER] for fundamentals of music processing), and an estimate of the F0 along with a confidence value, using the `libf0-realtime` library [@MeierSM25_RealTimeF0_ISMIR]. The library includes several real-time implementations of well-known F0 estimation algorithms, such as YIN [@CheveigneK02_YIN_JASA] and SWIPE [@CamachoH08_SawtoothWaveform_JASA]. YIN is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. It is computationally efficient and well-suited for low-latency applications, but it tends to suffer from estimation errors, particularly confusions with higher harmonics such as the octave. In contrast, SWIPE is a frequency-domain algorithm that estimates the F0 by matching different spectral representations of the audio with sawtooth-like kernels. While more computationally demanding, SWIPE typically yields more reliable estimates, in particular for vocal input signals. `pytch` allows users to choose between these algorithms depending on their specific needs and system capabilities. The obtained F0 estimates, which are natively computed in the unit Hz, are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and vocal characteristics, F0 estimates may exhibit artifacts such as discontinuities or pitch slides, which can make the resulting trajectories difficult to interpret [@RosenzweigSM19_StableF0_ISMIR]. Previous research has shown that using throat microphones can improve the isolation of individual voices in group singing contexts, resulting in cleaner signals and more accurate F0 estimates [@Scherbaum16_LarynxMicrophones_IWFMA]. To further enhance interpretability, `pytch` includes several optional post-processing steps: a confidence threshold to discard estimates with low confidence score, a median filter to smooth the trajectories, and a gradient filter to suppress abrupt pitch slides. As a final step in the audio analysis, the harmonic intervals between the F0 trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. After processing, the pipeline sets a flag that notifies the GUI that new data is ready for visualization. +For each channel, the analysis stage computes the audio level in dBFS, a time--frequency representation of the audio signal via the Short-Time Fourier Transform (see [@Mueller21_FMP_SPRINGER] for fundamentals of music processing), and an estimate of the F0 along with a confidence value, using the `libf0` library [@RosenzweigSM22_libf0_ISMIR-LBD]. The library includes several implementations of well-known F0 estimation algorithms. We make use of YIN [@CheveigneK02_YIN_JASA], which is a time-domain algorithm that computes the F0 based on a tweaked auto-correlation function. It is computationally efficient and well-suited for low-latency applications, but it tends to suffer from estimation errors, particularly confusions with higher harmonics such as the octave. The obtained F0 estimates, which are natively computed in the unit Hz, are converted to the unit cents using a user-specified reference frequency. Depending on the audio quality and vocal characteristics, F0 estimates may exhibit artifacts such as discontinuities or pitch slides, which can make the resulting trajectories difficult to interpret [@RosenzweigSM19_StableF0_ISMIR]. Previous research has shown that using throat microphones can improve the isolation of individual voices in group singing contexts, resulting in cleaner signals and more accurate F0 estimates [@Scherbaum16_LarynxMicrophones_IWFMA]. To further enhance interpretability, `pytch` includes several optional post-processing steps: a confidence threshold to discard estimates with low confidence score, a median filter to smooth the trajectories, and a gradient filter to suppress abrupt pitch slides. As a final step in the audio analysis, the harmonic intervals between the F0 trajectories are computed. Every audio feature is stored separately in a dedicated ring buffer. After processing, the pipeline sets a flag that notifies the GUI that new data is ready for visualization. # Graphical User Interface (GUI) @@ -89,6 +89,6 @@ The main GUI is organized into three horizontal sections. On the left, a control The right section, referred to as the "trajectory view," provides time-based visualizations of either the F0 trajectories ("pitches" tab) or the harmonic intervals between voices ("differential" tab) with a 10 second time context. Using the controls in the left-side menu, the user can select the F0 estimation algorithm and improve the real-time visualization by adjusting the confidence threshold, the median filter length for smoothing, and the tolerance of the gradient filter. F0 and interval trajectories can be displayed with respect to a fixed reference frequency or a dynamic one derived from a selected channel, the lowest, or highest detected voice. Axis limits for this section can also be manually set. # Acknowledgements -We would like to thank Lukas Dietz for his help with the implementation, Peter Meier and Sebastian Strahl for their support with integrating the real-time F0 algorithms, and all the singers who contributed to testing `pytch` during its development. +We would like to thank Lukas Dietz for his help with the implementation, Peter Meier and Sebastian Strahl for the collaboration on real-time implementations, and all the singers who contributed to testing `pytch` during its development. # References diff --git a/pytch/__init__.py b/pytch/__init__.py index fe1fb6b..ca0e42e 100644 --- a/pytch/__init__.py +++ b/pytch/__init__.py @@ -1,13 +1,3 @@ import logging -try: # Python 2.7+ - from logging import NullHandler -except ImportError: - - class NullHandler(logging.Handler): - def emit(self, record): - pass - - logging.basicConfig(level=logging.INFO) -logging.getLogger(__name__).addHandler(NullHandler()) diff --git a/pytch/audio.py b/pytch/audio.py index 6bec807..65a8d3c 100644 --- a/pytch/audio.py +++ b/pytch/audio.py @@ -13,9 +13,6 @@ from datetime import datetime import csv -_audio_lock = threading.Lock() # lock for raw audio buffer -_feat_lock = threading.Lock() # lock for feature buffers -_gui_lock = threading.Lock() # lock for communication with GUI logger = logging.getLogger("pytch.audio") eps = np.finfo(float).eps @@ -81,48 +78,53 @@ def __init__(self, size, dtype): self.buffer = np.zeros(size, dtype=dtype) self.write_head = 0 self.read_head = 0 + self.lock = threading.Lock() def write(self, data): """Writes data to buffer""" if data.shape[0] > self.size[0]: logger.warning("Buffer overflow!") - write_idcs = np.mod(self.write_head + np.arange(data.shape[0]), self.size[0]) - self.buffer[write_idcs, ...] = data - self.write_head = np.mod( - write_idcs[-1] + 1, self.size[0] - ) # set write head to the next bin to write to + with self.lock: + write_idcs = np.mod( + self.write_head + np.arange(data.shape[0]), self.size[0] + ) + self.buffer[write_idcs, ...] = data + self.write_head = np.mod( + write_idcs[-1] + 1, self.size[0] + ) # set write head to the next bin to write to def read_latest(self, n_frames): - """Reads n_frames from buffer, starting from latest data""" if self.size[0] < n_frames: - Exception("Cannot read more data than buffer length!") + Exception("cannot read more data than buffer length!") - read_idcs = np.mod( - self.size[0] + self.write_head - np.arange(n_frames) - 1, self.size[0] - )[::-1] - return self.buffer[read_idcs, ...] + with self.lock: + read_idcs = np.mod( + self.size[0] + self.write_head - np.arange(n_frames) - 1, self.size[0] + )[::-1] + return self.buffer[read_idcs, ...] - def read_next(self, n_frames, hop_frames=None): + def read(self, n_frames, hop_frames=None): """Reads n_frames from buffer, starting from latest read""" + with self.lock: + if ( + np.mod(self.size[0] + self.write_head - self.read_head, self.size[0]) + < n_frames + ): + # return empty array if not enough data available + return np.array([]) - if ( - np.mod(self.size[0] + self.write_head - self.read_head, self.size[0]) - < n_frames - ): - return np.array([]) - - read_idcs = np.mod( - self.size[0] + self.read_head + np.arange(n_frames), self.size[0] - )[::-1] + read_idcs = np.mod( + self.size[0] + self.read_head + np.arange(n_frames), self.size[0] + )[::-1] - if hop_frames is None: - hop_frames = n_frames + if hop_frames is None: + hop_frames = n_frames - self.read_head = np.mod( - self.read_head + hop_frames, self.size[0] - ) # advance read head + self.read_head = np.mod( + self.read_head + hop_frames, self.size[0] + ) # advance read head - return self.buffer[read_idcs, ...] + return self.buffer[read_idcs, ...] def flush(self): self.buffer = np.zeros_like(self.buffer) @@ -141,7 +143,6 @@ def __init__( channels=None, device_no=None, f0_algorithm="YIN", - gui=None, out_path="", ): self.fs = fs @@ -154,7 +155,6 @@ def __init__( self.device_no = device_no self.f0_algorithm = f0_algorithm self.out_path = out_path - self.gui = gui self.f0_lvl_threshold = -70 # minimum level in dB to compute f0 estimates self.frame_rate = self.fs / self.hop_len self.stream = None @@ -191,6 +191,10 @@ def __init__( dtype=np.float32, ) + self.worker = threading.Thread( + target=self.worker_thread + ) # thread for computations + # initialize output files if out_path != "": start_t = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") @@ -209,31 +213,6 @@ def __init__( + [f"Confidence Channel {ch}" for ch in channels] ) - # initialise output buffers that are read by GUI - if gui is not None: - self.new_gui_data_available = False - self.proc_lvl = gui.lvl_cvals[0] - self.proc_spec = np.zeros( - (self.raw_fft_buf.buffer.shape[1], len(self.channels) + 1) - ) - self.proc_stft = np.zeros( - ( - int(np.round(gui.disp_t_stft * self.frame_rate)), - len(self.fft_freqs), - len(self.channels) + 1, - ) - ) - self.proc_inst_f0 = np.full((1, len(self.channels) + 1), np.nan) - self.proc_f0 = np.zeros( - (int(np.round(gui.disp_t_f0 * self.frame_rate)), len(self.channels)) - ) - self.proc_diff = np.zeros( - ( - self.proc_f0.shape[0], - (len(self.channels) * (len(self.channels) - 1)) // 2, - ) - ) - def start_stream(self): """Start recording and processing""" if self.is_running: @@ -257,9 +236,6 @@ def start_stream(self): ) self.stream.start() self.is_running = True - self.worker = threading.Thread( - target=self.worker_thread - ) # thread for computations self.worker.start() def stop_stream(self): @@ -281,10 +257,7 @@ def close_stream(self): def worker_thread(self): """The thread that does all the audio processing""" while self.is_running: - with _audio_lock: - audio = self.audio_buf.read_next( - self.fft_len, self.hop_len - ) # get audio + audio = self.audio_buf.read(self.fft_len, self.hop_len) # get audio if audio.size == 0: sleep(0.001) @@ -294,16 +267,10 @@ def worker_thread(self): fft = self.compute_fft(audio) # compute fft f0, conf = self.compute_f0(audio, lvl) # compute f0 & confidence - with _feat_lock: - self.raw_lvl_buf.write(lvl) - self.raw_fft_buf.write(fft) - self.raw_f0_buf.write(f0) - self.raw_conf_buf.write(conf) - - # GUI pre-processing for faster updates - if self.gui is not None: - self.gui_preprocessing() - self.new_gui_data_available = True + self.raw_lvl_buf.write(lvl) + self.raw_fft_buf.write(fft) + self.raw_f0_buf.write(f0) + self.raw_conf_buf.write(conf) # write trajectories to disk if configured if self.out_path != "": @@ -316,8 +283,7 @@ def recording_callback(self, data, frames, time, status): data[:, self.channels].astype(np.float32, order="C") / 32768.0 ) # convert int16 to float32 - with _audio_lock: - self.audio_buf.write(audio_conv) + self.audio_buf.write(audio_conv) if self.out_path != "": self.audio_out_file.write(audio_conv) @@ -358,101 +324,12 @@ def compute_f0(self, audio, lvl): ) f0[:, c] = np.mean(f0_tmp) # take the center frame conf[:, c] = 1 - np.mean(conf_tmp) - elif self.f0_algorithm == "SWIPE": - # TODO: replace with real-time version when available - f0_tmp, _, conf_tmp = libf0.swipe( - audio[:, c], Fs=self.fs, H=self.fft_len, F_min=80.0, F_max=640.0 - ) - f0[:, c] = np.mean(f0_tmp) - conf[:, c] = 1 - np.mean(conf_tmp) else: f0[:, c] = np.zeros(f0.shape[0]) conf[:, c] = np.zeros(f0.shape[0]) return f0, conf - def gui_preprocessing(self): - """Prepares computed features for display in GUI which speeds up everything""" - # get raw data - lvl, spec, stft, f0, conf = self.read_latest_frames( - self.gui.disp_t_lvl, - self.gui.disp_t_spec, - self.gui.disp_t_stft, - self.gui.disp_t_f0, - self.gui.disp_t_conf, - ) - - # compute max level and clip - proc_lvl = np.clip( - np.max(lvl, axis=0), - a_min=self.gui.lvl_cvals[0], - a_max=self.gui.lvl_cvals[-1], - ) - - # preprocess spectrum - n_spec_frames = spec.shape[0] - spec = np.mean(spec, axis=0) - proc_spec = np.zeros((spec.shape[0], spec.shape[1] + 1)) - proc_spec[:, :-1] = spec - proc_spec[:, -1] = np.prod(spec, axis=1) - if self.gui.cur_spec_scale_type == "log": - proc_spec = np.log(1 + 1 * proc_spec) - max_values = np.abs(proc_spec).max(axis=0) - proc_spec /= np.where(max_values != 0, max_values, 1) - - # preprocess stft - proc_stft = np.zeros((stft.shape[0], stft.shape[1], stft.shape[2] + 1)) - proc_stft[:, :, :-1] = stft - proc_stft[:, :, -1] = np.prod(stft, axis=2) - if self.gui.cur_spec_scale_type == "log": - proc_stft = np.log(1 + 1 * proc_stft) - max_values = np.max(np.abs(proc_stft), axis=(0, 1), keepdims=True) - proc_stft /= np.where(max_values != 0, max_values, 1) - - # preprocess f0 - median_len = self.gui.cur_smoothing_len - if median_len > 0: - idcs = np.argwhere(f0 > 0) - f0[idcs] = median_filter(f0[idcs], size=median_len, axes=(0,)) - conf[idcs] = median_filter(conf[idcs], size=median_len, axes=(0,)) - - inst_f0 = np.mean(f0[-n_spec_frames:, :], axis=0) - inst_conf = np.mean(conf[-n_spec_frames:, :], axis=0) - inst_f0[inst_conf < self.gui.cur_conf_threshold] = np.nan - - # compute reference frequency - cur_ref_freq_mode = self.gui.cur_ref_freq_mode - ref_freq = self.gui.cur_ref_freq - if cur_ref_freq_mode == "fixed": - cur_ref_freq = ref_freq - elif cur_ref_freq_mode == "highest": - cur_ref_freq = np.max(np.mean(f0, axis=0)) - elif cur_ref_freq_mode == "lowest": - cur_ref_freq = np.min(np.mean(f0, axis=0)) - else: - cur_ref_freq = f0[-1, int(cur_ref_freq_mode[-2:]) - 1] - - # threshold trajectories and compute intervals - nan_val = 99999 - proc_f0, proc_diff = self.f0_diff_computations( - f0, - conf, - self.gui.cur_conf_threshold, - self.gui.cur_derivative_tol, - cur_ref_freq, - nan_val, - ) - proc_f0[proc_f0 == nan_val] = np.nan - proc_diff[proc_diff == nan_val] = np.nan - - with _gui_lock: - self.proc_lvl = proc_lvl - self.proc_spec[:] = proc_spec - self.proc_stft[:] = proc_stft - self.proc_f0[:] = proc_f0 - self.proc_inst_f0[:, :-1] = inst_f0 - self.proc_diff[:] = proc_diff - @staticmethod @njit def f0_diff_computations( @@ -489,29 +366,106 @@ def f0_diff_computations( return proc_f0, proc_diff - def read_latest_frames(self, t_lvl, t_spec, t_stft, t_f0, t_conf): - """Reads latest t seconds from buffers""" + def get_gui_data( + self, + disp_t_lvl, + disp_t_spec, + disp_t_stft, + disp_t_f0, + disp_t_conf, + lvl_cvals, + cur_spec_scale_type, + cur_smoothing_len, + cur_conf_threshold, + cur_ref_freq_mode, + cur_ref_freq, + cur_derivative_tol, + ): + """Reads and prepares data for GUI""" + lvl = self.raw_lvl_buf.read_latest(int(np.round(disp_t_lvl * self.frame_rate))) + spec_raw = self.raw_fft_buf.read_latest( + int(np.round(disp_t_stft * self.frame_rate)) + ) + f0 = self.raw_f0_buf.read_latest(int(np.round(disp_t_f0 * self.frame_rate))) + conf = self.raw_conf_buf.read_latest( + int(np.round(disp_t_conf * self.frame_rate)) + ) + + # compute max level and clip + if len(lvl) > 0: + lvl = np.clip( + np.max(lvl, axis=0), + a_min=lvl_cvals[0], + a_max=lvl_cvals[-1], + ) + + # preprocess spectrum + if len(spec_raw) > 0: + n_spec_frames = int(np.round(spec_raw.shape[0] * disp_t_spec / disp_t_stft)) + spec = np.mean(spec_raw[-n_spec_frames:, :, :], axis=0) + spec = np.concatenate((spec, np.prod(spec, axis=1).reshape(-1, 1)), axis=-1) + if cur_spec_scale_type == "log": + spec = np.log(1 + 1 * spec) + max_values = np.abs(spec).max(axis=0) + spec /= np.where(max_values != 0, max_values, 1) + else: + spec = np.array([]) - with _feat_lock: - lvl = self.raw_lvl_buf.read_latest(int(np.round(t_lvl * self.frame_rate))) - spec = self.raw_fft_buf.read_latest(int(np.round(t_spec * self.frame_rate))) - stft = self.raw_fft_buf.read_latest(int(np.round(t_stft * self.frame_rate))) - f0 = self.raw_f0_buf.read_latest(int(np.round(t_f0 * self.frame_rate))) - conf = self.raw_conf_buf.read_latest( - int(np.round(t_conf * self.frame_rate)) + # preprocess stft + if len(spec_raw) > 0: + stft = np.zeros( + (spec_raw.shape[0], spec_raw.shape[1], spec_raw.shape[2] + 1) ) + stft[:, :, :-1] = spec_raw + stft[:, :, -1] = np.prod(spec_raw, axis=2) + if cur_spec_scale_type == "log": + stft = np.log(1 + 1 * stft) + max_values = np.max(np.abs(stft), axis=(0, 1), keepdims=True) + stft /= np.where(max_values != 0, max_values, 1) + else: + stft = np.array([]) - return lvl, spec, stft, f0, conf - - def get_latest_gui_data(self): - """Reads pre-processed data for GUI""" - with _gui_lock: - self.new_gui_data_available = False - return ( - self.proc_lvl, - self.proc_spec, - self.proc_inst_f0, - self.proc_stft, - self.proc_f0, - self.proc_diff, + # preprocess f0 + if len(f0) > 0: + median_len = cur_smoothing_len + if median_len > 0: + idcs = np.argwhere(f0 > 0) + f0[idcs] = median_filter(f0[idcs], size=median_len, axes=(0,)) + conf[idcs] = median_filter(conf[idcs], size=median_len, axes=(0,)) + + n_spec_frames = int(np.round(spec_raw.shape[0] * disp_t_spec / disp_t_stft)) + inst_f0 = np.mean(f0[-n_spec_frames:, :], axis=0) + inst_f0 = np.concatenate((inst_f0, [0])) + inst_conf = np.mean(conf[-n_spec_frames:, :], axis=0) + inst_conf = np.concatenate((inst_conf, [0])) + inst_f0[inst_conf < cur_conf_threshold] = np.nan + + # compute reference frequency + cur_ref_freq_mode = cur_ref_freq_mode + ref_freq = cur_ref_freq + if cur_ref_freq_mode == "fixed": + cur_ref_freq = ref_freq + elif cur_ref_freq_mode == "highest": + cur_ref_freq = np.max(np.mean(f0, axis=0)) + elif cur_ref_freq_mode == "lowest": + cur_ref_freq = np.min(np.mean(f0, axis=0)) + else: + cur_ref_freq = f0[-1, int(cur_ref_freq_mode[-2:]) - 1] + + # threshold trajectories and compute intervals + nan_val = 99999 + f0, diff = self.f0_diff_computations( + f0, + conf, + cur_conf_threshold, + cur_derivative_tol, + cur_ref_freq, + nan_val, ) + f0[f0 == nan_val] = np.nan + diff[diff == nan_val] = np.nan + else: + inst_f0 = np.array([]) + diff = np.array([]) + + return lvl, spec, inst_f0, stft, f0, diff diff --git a/pytch/gui.py b/pytch/gui.py index 531a5e3..7ad1d7c 100644 --- a/pytch/gui.py +++ b/pytch/gui.py @@ -3,9 +3,7 @@ """GUI Functions""" import logging -import threading import sys -import time import numpy as np import importlib.metadata @@ -19,7 +17,6 @@ import pyqtgraph as pg logger = logging.getLogger("pytch.gui") -_refresh_lock = threading.Lock() # lock for GUI updates def start_gui(): @@ -105,7 +102,7 @@ def update_channel_info(self, menu_index): sampling_rate_options = get_fs_options(sounddevice_index) self.channel_selector = ChannelSelector( - n_channels=nmax_channels, channels_enabled=[0], menu_buttons=self.buttons + n_channels=nmax_channels, menu_buttons=self.buttons ) self.channel_options.setWidget(self.channel_selector) @@ -144,7 +141,7 @@ def get_input_settings(self): class ChannelSelector(qw.QWidget): """Widget for the channel buttons on the right side of the input menu""" - def __init__(self, n_channels, channels_enabled, menu_buttons): + def __init__(self, n_channels, menu_buttons): super().__init__() self.setLayout(qw.QVBoxLayout()) @@ -189,7 +186,7 @@ def __init__(self, sounddevice_idx, channels, fs, fft_size, out_path): self.fs = fs self.fft_size = fft_size self.out_path = out_path - self.f0_algorithms = ["YIN", "SWIPE"] + self.f0_algorithms = ["YIN"] self.buf_len_sec = 30.0 self.spec_scale_types = ["log", "linear"] self.ref_freq_modes = ["fixed", "highest", "lowest"] @@ -215,7 +212,7 @@ def __init__(self, sounddevice_idx, channels, fs, fft_size, out_path): self.cur_conf_threshold = 0.5 self.cur_derivative_tol = 600 self.cur_smoothing_len = 3 - self.last_refresh = time.time() + self.gui_refresh_ms = int(np.round(1000 / 60)) # 60 fps # status variables self.is_running = False @@ -240,7 +237,6 @@ def __init__(self, sounddevice_idx, channels, fs, fft_size, out_path): channels=self.channels, device_no=self.sounddevice_idx, f0_algorithm=self.f0_algorithms[0], - gui=self, out_path=out_path, ) @@ -271,9 +267,9 @@ def __init__(self, sounddevice_idx, channels, fs, fft_size, out_path): layout.addWidget(splitter) # refresh timer - self.refresh_timer = GUIRefreshTimer() - self.refresh_timer.refresh_signal.connect(self.refresh_gui) - self.refresh_timer.start() + self.refresh_timer = qc.QTimer() + self.refresh_timer.timeout.connect(self.refresh_gui) + self.refresh_timer.start(self.gui_refresh_ms) self.play_pause() # start recording and plotting @@ -281,31 +277,38 @@ def play_pause(self): """Starts or stops the GUI""" if self.is_running: self.audio_processor.stop_stream() - self.refresh_timer.stop_emitting() + self.refresh_timer.stop() self.is_running = False self.menu.play_pause_button.setText("Play") else: self.audio_processor.start_stream() - self.refresh_timer.start_emitting() + self.refresh_timer.start(self.gui_refresh_ms) self.is_running = True self.menu.play_pause_button.setText("Pause") @qc.pyqtSlot() def refresh_gui(self): """GUI refresh function, needs to be as fast as possible""" - with _refresh_lock: # only update when last update has finished - if self.audio_processor.new_gui_data_available: - # get preprocessed audio data from audio processor - lvl, spec, inst_f0, stft, f0, diff = ( - self.audio_processor.get_latest_gui_data() - ) - # update widgets - self.channel_views.on_draw(lvl, spec, inst_f0, stft) - self.trajectory_views.on_draw(f0, diff) + # get preprocessed audio data from audio processor + lvl, spec, inst_f0, stft, f0, diff = self.audio_processor.get_gui_data( + disp_t_lvl=self.disp_t_lvl, + disp_t_spec=self.disp_t_spec, + disp_t_stft=self.disp_t_stft, + disp_t_f0=self.disp_t_f0, + disp_t_conf=self.disp_t_conf, + lvl_cvals=self.lvl_cvals, + cur_spec_scale_type=self.cur_spec_scale_type, + cur_smoothing_len=self.cur_smoothing_len, + cur_conf_threshold=self.cur_conf_threshold, + cur_ref_freq_mode=self.cur_ref_freq_mode, + cur_ref_freq=self.cur_ref_freq, + cur_derivative_tol=self.cur_derivative_tol, + ) - # logger.info(f"Last refresh finished {time.time() - self.last_refresh}s ago") - self.last_refresh = time.time() + # update widgets + self.channel_views.on_draw(lvl, spec, inst_f0, stft) + self.trajectory_views.on_draw(f0, diff) def menu_toggle_button(self): """The button for toggeling the menu""" @@ -334,39 +337,12 @@ def toggle_menu(self): def closeEvent(self, a0): """Clean up when GUI is closed""" - self.refresh_timer.terminate() + self.refresh_timer.stop() self.audio_processor.stop_stream() self.audio_processor.close_stream() sys.exit() -class GUIRefreshTimer(qc.QThread): - """Timer for GUI refreshes""" - - refresh_signal = qc.pyqtSignal() - - def __init__(self): - super().__init__() - self.emit_signal = True - - def run(self): - while 1: - time.sleep(1 / 24) # ideally update with 24 fps - if self.emit_signal: - with ( - _refresh_lock - ): # make sure last refresh is done before sending next one - self.refresh_signal.emit() - - def stop_emitting(self): - """when user presses pause""" - self.emit_signal = False - - def start_emitting(self): - """when user presses play""" - self.emit_signal = True - - class ProcessingMenu(qw.QFrame): """The processing menu on the left side of the main window""" @@ -748,22 +724,16 @@ def __init__( @qc.pyqtSlot(object, object, object, object) def on_draw(self, lvl, spec, inst_f0, stft): """Refreshes all widgets as fast as possible""" - # prepare data - if self.is_product: - lvl_update = lvl[-1] - stft_update = stft[:, :, -1] - spec_update = spec[:, -1] - inst_f0_update = inst_f0[:, -1] - else: - lvl_update = lvl[self.ch_id] - stft_update = stft[:, :, self.ch_id] - spec_update = spec[:, self.ch_id] - inst_f0_update = inst_f0[:, self.ch_id] + idx = -1 if self.is_product else self.ch_id - # update widgets - self.level_refresh_signal.emit(lvl_update) - self.spectrum_refresh_signal.emit(spec_update, inst_f0_update) - self.spectrogram_refresh_signal.emit(stft_update) + if len(lvl) > 0 and not self.is_product: + self.level_refresh_signal.emit(lvl[idx]) + + if len(spec) > 0: + self.spectrum_refresh_signal.emit(spec[:, idx], inst_f0[idx]) + + if len(stft) > 0: + self.spectrogram_refresh_signal.emit(stft[:, :, idx]) def show_spectrum_widget(self, show): self.spectrum_widget.setVisible(show) @@ -894,7 +864,8 @@ def on_disp_freq_lims_changed(self, disp_freq_lims): @qc.pyqtSlot(object, float) def on_draw(self, data_plot, inst_f0=None): """Updates the spectrum and the fundamental frequency line.""" - self._line.setData(self.f_axis, data_plot) # Update the spectrum line + self._line.setData(x=self.f_axis, y=data_plot) # Update the spectrum line + if inst_f0 is not None: self._inst_f0_line.setPos(inst_f0) # Update the fundamental frequency line @@ -1013,8 +984,10 @@ def __init__(self, main_window: MainWindow, *args, **kwargs): @qc.pyqtSlot(object, object) def on_draw(self, f0, diff): - self.pitch_view.on_draw(f0) - if len(self.main_window.channels) > 1: + if len(f0) > 0: + self.pitch_view.on_draw(f0) + + if len(self.main_window.channels) > 1 and len(diff) > 0: self.pitch_diff_view.on_draw(diff) def on_disp_pitch_lims_changed(self, disp_pitch_lims): @@ -1097,8 +1070,9 @@ def __init__(self, main_window: MainWindow, *args, **kwargs): @qc.pyqtSlot(object) def on_draw(self, f0): """Updates the F0 trajectories for each channel.""" - for i in range(f0.shape[1]): - self._lines[i].setData(self.t_axis, f0[:, i]) # Update the line data + if len(f0) > 0: + for i in range(f0.shape[1]): + self._lines[i].setData(self.t_axis, f0[:, i]) # Update the line data class DifferentialPitchWidget(pg.GraphicsLayoutWidget): @@ -1172,6 +1146,11 @@ def __init__(self, main_window: MainWindow, *args, **kwargs): @qc.pyqtSlot(object) def on_draw(self, diff): """Updates the pitch differences for each channel pair.""" - for i in range(diff.shape[1]): - self._lines[i][0].setData(self.t_axis, diff[:, i]) # Update the solid line - self._lines[i][1].setData(self.t_axis, diff[:, i]) # Update the dashed line + if len(diff) > 0: + for i in range(diff.shape[1]): + self._lines[i][0].setData( + self.t_axis, diff[:, i] + ) # Update the solid line + self._lines[i][1].setData( + self.t_axis, diff[:, i] + ) # Update the dashed line diff --git a/pytch/gui_utils.py b/pytch/gui_utils.py index 87d9db0..1355b9b 100644 --- a/pytch/gui_utils.py +++ b/pytch/gui_utils.py @@ -56,20 +56,6 @@ def __init__(self): ) -class QVLine(qw.QFrame): - """A vertical separation line""" - - def __init__(self): - super().__init__() - self.setMinimumHeight(1) - self.setFixedWidth(20) - self.setFrameShape(qw.QFrame.Shape.VLine) - self.setFrameShadow(qw.QFrame.Shadow.Sunken) - self.setSizePolicy( - qw.QSizePolicy.Policy.Preferred, qw.QSizePolicy.Policy.Minimum - ) - - def disable_interactivity(plot_item): plot_item.setMouseEnabled(x=False, y=False) # Disable mouse panning & zooming plot_item.hideButtons() # Disable corner auto-scale button diff --git a/test/test_buffer.py b/test/test_buffer.py index 499f35e..f2280d1 100644 --- a/test/test_buffer.py +++ b/test/test_buffer.py @@ -12,6 +12,6 @@ def test_ring_buffer(): ) buf.write(test_data) assert np.all(buf.read_latest(20) == test_data) - assert np.all(buf.read_next(10, 10) == test_data[:10, :, :][::-1, :, :]) - assert np.all(buf.read_next(10, 10) == test_data[10:20, :, :][::-1, :, :]) - assert buf.read_next(10, 10).size == 0 + assert np.all(buf.read(10, 10) == test_data[:10, :, :][::-1, :, :]) + assert np.all(buf.read(10, 10) == test_data[10:20, :, :][::-1, :, :]) + assert buf.read(10, 10).size == 0 From 04473bd91e2d016f3583f652c00d799170372f0b Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Fri, 27 Jun 2025 16:49:16 +0200 Subject: [PATCH 26/27] smaller fixes and first docstrings --- pyproject.toml | 3 + pytch/audio.py | 247 ++++++++++++++++++++++++++++++++++---------- pytch/cli.py | 2 - pytch/gui.py | 24 +++-- pytch/gui_utils.py | 14 ++- test/test_buffer.py | 6 +- 6 files changed, 228 insertions(+), 68 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index aa7a471..27c528b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,5 +26,8 @@ include-package-data = false [tool.setuptools.packages.find] include = ["pytch*"] +[tool.poetry.extras] +test = ["pytest"] + [project.scripts] pytch = "pytch.cli:main" diff --git a/pytch/audio.py b/pytch/audio.py index 65a8d3c..e020764 100644 --- a/pytch/audio.py +++ b/pytch/audio.py @@ -19,7 +19,12 @@ def get_input_devices(): - """Returns a list of devices.""" + """Returns a list of input devices. + + Returns: + List of available input devices. + + """ input_devices = [] for device_id, device in enumerate(sounddevice.query_devices()): if device["max_input_channels"] > 0: @@ -28,7 +33,15 @@ def get_input_devices(): def get_fs_options(device_idx): - """Returns a dictionary of supported sampling rates for all devices.""" + """Returns a dictionary of supported sampling rates for all devices. + + Args: + device_idx: Device index. + + Returns: + List of supported sampling rates. + + """ candidates = [8000.0, 11025.0, 16000.0, 22050.0, 32000.0, 37800.0, 44100.0, 48000.0] supported_fs = [] for c in candidates: @@ -39,7 +52,16 @@ def get_fs_options(device_idx): def check_fs(device_index, fs): - """Validates chosen sampling rate.""" + """Validates chosen sampling rate. + + Args: + device_index: Device index. + fs: Sampling rate. + + Returns: + True if sampling rate is supported, else False. + + """ valid = True try: sounddevice.check_input_settings( @@ -58,14 +80,32 @@ def check_fs(device_index, fs): @njit -def f2cent(f, standard_frequency=440.0): - """Convert from Hz to Cents""" - return 1200.0 * np.log2(np.abs(f) / standard_frequency + eps) +def f2cent(f, f_ref=440.0): + """Convert frequency from Hz to Cents. + + Args: + f: Frequency. + f_ref: Reference frequency. + + Returns: + Frequency in Cents. + + """ + return 1200.0 * np.log2(np.abs(f) / f_ref + eps) @njit def gradient_filter(y, max_gradient): - """Get index where the abs gradient of x, y is < max_gradient.""" + """Gradient filter. + + Args: + y: Signal. + max_gradient: Upper boundary for absolute gradient. + + Returns: + Indices where the absolute gradient of y is < max_gradient. + + """ return np.where(np.abs(np.diff(f2cent(y))) < max_gradient)[0] @@ -73,7 +113,12 @@ class RingBuffer: """Generic ring buffer for n-dimensional data""" def __init__(self, size, dtype): - """Initialize buffer, size should be of format (n_frames, ..., n_channels)""" + """Initialize buffer. + + Args: + size: buffer size (n_frames, ..., n_channels) + dtype: buffer dtype + """ self.size = size self.buffer = np.zeros(size, dtype=dtype) self.write_head = 0 @@ -81,7 +126,12 @@ def __init__(self, size, dtype): self.lock = threading.Lock() def write(self, data): - """Writes data to buffer""" + """Writes data to buffer. + + Args: + data: Data of shape (n_frames, ..., n_channels). + + """ if data.shape[0] > self.size[0]: logger.warning("Buffer overflow!") with self.lock: @@ -94,6 +144,15 @@ def write(self, data): ) # set write head to the next bin to write to def read_latest(self, n_frames): + """Read latest n_frames frames from buffer, starting from write head. + + Args: + n_frames: Number of frames to read. + + Returns: + Read data. + + """ if self.size[0] < n_frames: Exception("cannot read more data than buffer length!") @@ -103,8 +162,17 @@ def read_latest(self, n_frames): )[::-1] return self.buffer[read_idcs, ...] - def read(self, n_frames, hop_frames=None): - """Reads n_frames from buffer, starting from latest read""" + def read_next(self, n_frames, hop_frames=None): + """Read n_frames frames from buffer, starting from read head. + + Args: + n_frames: Number of frames to read. + hop_frames: Read head increment. + + Returns: + Read data. + + """ with self.lock: if ( np.mod(self.size[0] + self.write_head - self.read_head, self.size[0]) @@ -127,13 +195,14 @@ def read(self, n_frames, hop_frames=None): return self.buffer[read_idcs, ...] def flush(self): + """Flush buffer.""" self.buffer = np.zeros_like(self.buffer) self.write_head = 0 self.read_head = 0 class AudioProcessor: - """Class for recording and processing of multichannel audio""" + """Class for recording and processing of multichannel audio.""" def __init__( self, @@ -145,10 +214,21 @@ def __init__( f0_algorithm="YIN", out_path="", ): + """Initialize audio processing. + + Args: + fs: Sampling rate. + buf_len_sec: Buffer length in seconds. + fft_len: FFT length in bins. + channels: List of channels to record. + device_no: Index of device to record from. + f0_algorithm: F0 algorithm to use. + out_path: Output directory for F0 trajectories. + """ self.fs = fs self.buf_len_sec = buf_len_sec self.fft_len = fft_len - self.hop_len = 2 ** int(np.log2(fs / 25)) + self.hop_len = self.fft_len // 2 self.fft_freqs = np.fft.rfftfreq(self.fft_len, 1 / self.fs) self.fft_win = np.hanning(self.fft_len).reshape(-1, 1) self.channels = [0] if channels is None else channels @@ -191,10 +271,6 @@ def __init__( dtype=np.float32, ) - self.worker = threading.Thread( - target=self.worker_thread - ) # thread for computations - # initialize output files if out_path != "": start_t = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") @@ -215,8 +291,7 @@ def __init__( def start_stream(self): """Start recording and processing""" - if self.is_running: - self.stop_stream() + self.stop_stream() # initialize audio stream self.stream = sounddevice.InputStream( @@ -236,14 +311,22 @@ def start_stream(self): ) self.stream.start() self.is_running = True + self.worker = threading.Thread( + target=self.worker_thread + ) # thread for computations self.worker.start() def stop_stream(self): """Stop recording and processing""" if self.is_running: self.is_running = False - self.worker.join() self.stream.stop() + self.worker.join() + self.audio_buf.flush() + self.raw_lvl_buf.flush() + self.raw_fft_buf.flush() + self.raw_f0_buf.flush() + self.raw_conf_buf.flush() def close_stream(self): """Close stream, processing thread and files""" @@ -255,17 +338,19 @@ def close_stream(self): self.traj_out_file.close() def worker_thread(self): - """The thread that does all the audio processing""" + """The thread that does the audio processing""" while self.is_running: - audio = self.audio_buf.read(self.fft_len, self.hop_len) # get audio + audio = self.audio_buf.read_next(self.fft_len, self.hop_len) # get audio if audio.size == 0: sleep(0.001) continue + start_t = time() lvl = self.compute_level(audio) # compute level fft = self.compute_fft(audio) # compute fft f0, conf = self.compute_f0(audio, lvl) # compute f0 & confidence + logger.debug(f"Processing took {time()-start_t:.4f}s.") self.raw_lvl_buf.write(lvl) self.raw_fft_buf.write(fft) @@ -290,17 +375,27 @@ def recording_callback(self, data, frames, time, status): @staticmethod def compute_level(audio): - """Peak level in dB""" + """Computes peak level in dB""" return 10 * np.log10(np.max(np.abs(audio + eps), axis=0)).reshape(1, -1) def compute_fft(self, audio): - """FFT""" + """Computes the Fast Fourier Transform (FFT)""" return np.abs(np.fft.rfft(audio * self.fft_win, self.fft_len, axis=0))[ np.newaxis, :, : ] def compute_f0(self, audio, lvl): - """Fundamental frequency estimation""" + """Fundamental frequency (F0) estimation. + + Args: + audio: audio signal + lvl: audio levels + + Returns: + f0: F0 estimate. + conf: Confidence. + + """ f0 = np.zeros((1, audio.shape[1])) conf = np.zeros((1, audio.shape[1])) @@ -332,19 +427,31 @@ def compute_f0(self, audio, lvl): @staticmethod @njit - def f0_diff_computations( - f0, conf, cur_conf_threshold, cur_derivative_tol, cur_ref_freq, nan_val - ): - """Computes pair-wise differences between F0-trajectories, speed-up using jit-compilation""" + def f0_diff_computations(f0, conf, conf_threshold, gradient_tol, ref_freq, nan_val): + """Computes pair-wise differences between F0-trajectories, speed-up using jit-compilation. + + Args: + f0: Fundamental frequencies of all voices. + conf: Confidences of all voices. + conf_threshold: Confidence threshold. + gradient_tol: Tolerance for gradient filter. + ref_freq: Reference frequency. + nan_val: Value that is used in replace for NaN. + + Returns: + proc_f0: Thresholded and smoothed F0 trajectories in Cents. + proc_diff: Harmonic differences between voices in Cents. + + """ proc_f0 = np.ones_like(f0) * nan_val for i in range(f0.shape[1]): # filter f0 using confidence threshold and gradient filter - index = np.where((conf[:, i] >= cur_conf_threshold) & (f0[:, i] > 0))[0] - index_grad = gradient_filter(f0[:, i], cur_derivative_tol) + index = np.where((conf[:, i] >= conf_threshold) & (f0[:, i] > 0))[0] + index_grad = gradient_filter(f0[:, i], gradient_tol) index = np.intersect1d(index, index_grad) - proc_f0[index, i] = f2cent(f0[index, i], cur_ref_freq) + proc_f0[index, i] = f2cent(f0[index, i], ref_freq) proc_diff = ( np.ones((f0.shape[0], (f0.shape[1] * (f0.shape[1] - 1)) // 2)) * nan_val @@ -374,14 +481,42 @@ def get_gui_data( disp_t_f0, disp_t_conf, lvl_cvals, - cur_spec_scale_type, - cur_smoothing_len, - cur_conf_threshold, - cur_ref_freq_mode, - cur_ref_freq, - cur_derivative_tol, + spec_scale_type, + smoothing_len, + conf_threshold, + ref_freq_mode, + ref_freq, + gradient_tol, ): - """Reads and prepares data for GUI""" + """Reads and prepares data for GUI. + + Args: + disp_t_lvl: Time for level computation. + disp_t_spec: Time for spectrum computation. + disp_t_stft: Time for spectrogram computation. + disp_t_f0: Time for F0 computation. + disp_t_conf: Time for confidence computation. + lvl_cvals: GUI level limits. + spec_scale_type: Spectral scale type. + smoothing_len: Smoothing filter length in frames. + conf_threshold: Confidence threshold. + ref_freq_mode: Reference frequency mode. + ref_freq: Reference frequency. + gradient_tol: Gradient filter tolerance. + + Returns: + lvl: Levels for all channels. + spec: Spectra for all channels & product. + inst_f0: Instantaneous F0 for all channels & product. + stft: Spectrograms for all channels & product. + f0: F0 estimates for all channels. + diff: Differential F0s (harmonic intervals) for all channels. + + """ + start_t = time() + + # read latest data from buffer + # why not read_next()? -> we prioritize low latency over completeness of the visualized data. lvl = self.raw_lvl_buf.read_latest(int(np.round(disp_t_lvl * self.frame_rate))) spec_raw = self.raw_fft_buf.read_latest( int(np.round(disp_t_stft * self.frame_rate)) @@ -404,7 +539,7 @@ def get_gui_data( n_spec_frames = int(np.round(spec_raw.shape[0] * disp_t_spec / disp_t_stft)) spec = np.mean(spec_raw[-n_spec_frames:, :, :], axis=0) spec = np.concatenate((spec, np.prod(spec, axis=1).reshape(-1, 1)), axis=-1) - if cur_spec_scale_type == "log": + if spec_scale_type == "log": spec = np.log(1 + 1 * spec) max_values = np.abs(spec).max(axis=0) spec /= np.where(max_values != 0, max_values, 1) @@ -418,7 +553,7 @@ def get_gui_data( ) stft[:, :, :-1] = spec_raw stft[:, :, -1] = np.prod(spec_raw, axis=2) - if cur_spec_scale_type == "log": + if spec_scale_type == "log": stft = np.log(1 + 1 * stft) max_values = np.max(np.abs(stft), axis=(0, 1), keepdims=True) stft /= np.where(max_values != 0, max_values, 1) @@ -427,7 +562,7 @@ def get_gui_data( # preprocess f0 if len(f0) > 0: - median_len = cur_smoothing_len + median_len = smoothing_len if median_len > 0: idcs = np.argwhere(f0 > 0) f0[idcs] = median_filter(f0[idcs], size=median_len, axes=(0,)) @@ -438,28 +573,28 @@ def get_gui_data( inst_f0 = np.concatenate((inst_f0, [0])) inst_conf = np.mean(conf[-n_spec_frames:, :], axis=0) inst_conf = np.concatenate((inst_conf, [0])) - inst_f0[inst_conf < cur_conf_threshold] = np.nan + inst_f0[inst_conf < conf_threshold] = np.nan # compute reference frequency - cur_ref_freq_mode = cur_ref_freq_mode - ref_freq = cur_ref_freq - if cur_ref_freq_mode == "fixed": - cur_ref_freq = ref_freq - elif cur_ref_freq_mode == "highest": - cur_ref_freq = np.max(np.mean(f0, axis=0)) - elif cur_ref_freq_mode == "lowest": - cur_ref_freq = np.min(np.mean(f0, axis=0)) + ref_freq_mode = ref_freq_mode + ref_freq = ref_freq + if ref_freq_mode == "fixed": + ref_freq = ref_freq + elif ref_freq_mode == "highest": + ref_freq = np.max(np.mean(f0, axis=0)) + elif ref_freq_mode == "lowest": + ref_freq = np.min(np.mean(f0, axis=0)) else: - cur_ref_freq = f0[-1, int(cur_ref_freq_mode[-2:]) - 1] + ref_freq = f0[-1, int(ref_freq_mode[-2:]) - 1] # threshold trajectories and compute intervals nan_val = 99999 f0, diff = self.f0_diff_computations( f0, conf, - cur_conf_threshold, - cur_derivative_tol, - cur_ref_freq, + conf_threshold, + gradient_tol, + ref_freq, nan_val, ) f0[f0 == nan_val] = np.nan @@ -468,4 +603,6 @@ def get_gui_data( inst_f0 = np.array([]) diff = np.array([]) + logger.debug(f"GUI pre-processing took {time()-start_t:.4f}s.") + return lvl, spec, inst_f0, stft, f0, diff diff --git a/pytch/cli.py b/pytch/cli.py index 17b76c1..b29640a 100644 --- a/pytch/cli.py +++ b/pytch/cli.py @@ -26,8 +26,6 @@ def main(): else: logger.setLevel(logging.INFO) - logger.debug("starting app...") - start_gui() diff --git a/pytch/gui.py b/pytch/gui.py index 7ad1d7c..f946e3f 100644 --- a/pytch/gui.py +++ b/pytch/gui.py @@ -6,6 +6,7 @@ import sys import numpy as np import importlib.metadata +from time import time from .gui_utils import FloatQLineEdit, QHLine, disable_interactivity, colors from .audio import AudioProcessor, get_input_devices, get_fs_options @@ -298,12 +299,12 @@ def refresh_gui(self): disp_t_f0=self.disp_t_f0, disp_t_conf=self.disp_t_conf, lvl_cvals=self.lvl_cvals, - cur_spec_scale_type=self.cur_spec_scale_type, - cur_smoothing_len=self.cur_smoothing_len, - cur_conf_threshold=self.cur_conf_threshold, - cur_ref_freq_mode=self.cur_ref_freq_mode, - cur_ref_freq=self.cur_ref_freq, - cur_derivative_tol=self.cur_derivative_tol, + spec_scale_type=self.cur_spec_scale_type, + smoothing_len=self.cur_smoothing_len, + conf_threshold=self.cur_conf_threshold, + ref_freq_mode=self.cur_ref_freq_mode, + ref_freq=self.cur_ref_freq, + gradient_tol=self.cur_derivative_tol, ) # update widgets @@ -729,7 +730,7 @@ def on_draw(self, lvl, spec, inst_f0, stft): if len(lvl) > 0 and not self.is_product: self.level_refresh_signal.emit(lvl[idx]) - if len(spec) > 0: + if len(spec) > 0 and len(inst_f0) > 0: self.spectrum_refresh_signal.emit(spec[:, idx], inst_f0[idx]) if len(stft) > 0: @@ -796,6 +797,7 @@ def __init__(self, main_window: MainWindow, has_xlabel=True): @qc.pyqtSlot(float) def on_draw(self, lvl): """Updates the image with new data.""" + start_t = time() lvl_conv = self.lvl_converter(lvl) plot_array = np.linspace( 0, lvl_conv, int(lvl_conv * np.abs(self.main_window.lvl_cvals[0])) @@ -806,6 +808,7 @@ def on_draw(self, lvl): self.img.setImage(plot_array) self.img.setLevels([0, 1]) + logger.debug(f"Lvl update took {time()-start_t:.4f}s.") class SpectrumWidget(pg.GraphicsLayoutWidget): @@ -864,10 +867,12 @@ def on_disp_freq_lims_changed(self, disp_freq_lims): @qc.pyqtSlot(object, float) def on_draw(self, data_plot, inst_f0=None): """Updates the spectrum and the fundamental frequency line.""" + start_t = time() self._line.setData(x=self.f_axis, y=data_plot) # Update the spectrum line if inst_f0 is not None: self._inst_f0_line.setPos(inst_f0) # Update the fundamental frequency line + logger.debug(f"Spectrum update took {time() - start_t:.4f}s.") class SpectrogramWidget(pg.GraphicsLayoutWidget): @@ -932,6 +937,7 @@ def on_disp_freq_lims_changed(self, disp_freq_lims): @qc.pyqtSlot(object) def on_draw(self, data_plot): """Updates the spectrogram with new data.""" + start_t = time() self.img.setImage(data_plot.T, autoLevels=False) self.img.setRect( qc.QRectF( @@ -941,6 +947,7 @@ def on_draw(self, data_plot): self.default_spec.shape[0], ) ) + logger.debug(f"Spectrogram update took {time() - start_t:.4f}s.") class TrajectoryViews(qw.QTabWidget): @@ -984,12 +991,15 @@ def __init__(self, main_window: MainWindow, *args, **kwargs): @qc.pyqtSlot(object, object) def on_draw(self, f0, diff): + start_t = time() if len(f0) > 0: self.pitch_view.on_draw(f0) if len(self.main_window.channels) > 1 and len(diff) > 0: self.pitch_diff_view.on_draw(diff) + logger.debug(f"Trajectory view update took {time() - start_t:.4f}s.") + def on_disp_pitch_lims_changed(self, disp_pitch_lims): self.change_pitch_lims(self.pitch_view, disp_pitch_lims) if len(self.main_window.channels) > 1: diff --git a/pytch/gui_utils.py b/pytch/gui_utils.py index 1355b9b..f14b73d 100644 --- a/pytch/gui_utils.py +++ b/pytch/gui_utils.py @@ -26,7 +26,13 @@ class FloatQLineEdit(qw.QLineEdit): accepted_value = qc.pyqtSignal(float) def __init__(self, default=None, *args, **kwargs): - qw.QLineEdit.__init__(self, *args, **kwargs) + """Initialization. + + Args: + default: Default value. + + """ + qw.QLineEdit.__init__(self) self.setValidator(qg.QDoubleValidator()) self.setFocusPolicy(qc.Qt.FocusPolicy.ClickFocus | qc.Qt.FocusPolicy.TabFocus) self.returnPressed.connect(self.do_check) @@ -57,6 +63,12 @@ def __init__(self): def disable_interactivity(plot_item): + """Disables interactive elements, like zooming or context menus, for given plot. + + Args: + plot_item: PyQt PlotItem. + + """ plot_item.setMouseEnabled(x=False, y=False) # Disable mouse panning & zooming plot_item.hideButtons() # Disable corner auto-scale button plot_item.setMenuEnabled(False) # Disable right-click context menu diff --git a/test/test_buffer.py b/test/test_buffer.py index f2280d1..499f35e 100644 --- a/test/test_buffer.py +++ b/test/test_buffer.py @@ -12,6 +12,6 @@ def test_ring_buffer(): ) buf.write(test_data) assert np.all(buf.read_latest(20) == test_data) - assert np.all(buf.read(10, 10) == test_data[:10, :, :][::-1, :, :]) - assert np.all(buf.read(10, 10) == test_data[10:20, :, :][::-1, :, :]) - assert buf.read(10, 10).size == 0 + assert np.all(buf.read_next(10, 10) == test_data[:10, :, :][::-1, :, :]) + assert np.all(buf.read_next(10, 10) == test_data[10:20, :, :][::-1, :, :]) + assert buf.read_next(10, 10).size == 0 From d020ad945dbf35fc6d6c7feac7c7088d9d0a601c Mon Sep 17 00:00:00 2001 From: sebastianrosenzweig Date: Tue, 1 Jul 2025 20:09:15 +0200 Subject: [PATCH 27/27] more docstrings --- pytch/audio.py | 2 +- pytch/cli.py | 1 + pytch/gui.py | 396 +++++++++++++++++++++++++++++++++++---------- pytch/gui_utils.py | 2 +- 4 files changed, 314 insertions(+), 87 deletions(-) diff --git a/pytch/audio.py b/pytch/audio.py index e020764..4363c7e 100644 --- a/pytch/audio.py +++ b/pytch/audio.py @@ -363,7 +363,7 @@ def worker_thread(self): writer.writerow(np.concatenate((f0[0, :], conf[0, :]))) def recording_callback(self, data, frames, time, status): - """Receives and stores frames from soundcard, data is of shape (frames, channels)""" + """Receives frames from soundcard and stores them in buffer, data is of shape (frames, channels)""" audio_conv = ( data[:, self.channels].astype(np.float32, order="C") / 32768.0 ) # convert int16 to float32 diff --git a/pytch/cli.py b/pytch/cli.py index b29640a..f26b3b7 100644 --- a/pytch/cli.py +++ b/pytch/cli.py @@ -9,6 +9,7 @@ def main(): + """Parses commandline arguments and starts pytch.""" parser = argparse.ArgumentParser("pytch") parser.add_argument( "--debug", diff --git a/pytch/gui.py b/pytch/gui.py index f946e3f..9660bc9 100644 --- a/pytch/gui.py +++ b/pytch/gui.py @@ -21,7 +21,7 @@ def start_gui(): - """Starts the GUI, first show input menu, then open the main GUI""" + """Starts the GUI, first show input menu, then open the main GUI.""" app = qw.QApplication(sys.argv) input_dialog = InputMenu() if input_dialog.exec() == qw.QDialog.DialogCode.Accepted: @@ -39,10 +39,10 @@ def start_gui(): class InputMenu(qw.QDialog): - """Pop up menu at program start that offers user to customise input settings""" + """Pop up menu at program start that offers user to customise input settings.""" - def __init__(self, *args, **kwargs): - qw.QDialog.__init__(self, *args, **kwargs) + def __init__(self): + qw.QDialog.__init__(self) self.setModal(True) layout = qw.QGridLayout() @@ -97,7 +97,7 @@ def __init__(self, *args, **kwargs): self.update_channel_info(0) def update_channel_info(self, menu_index): - """Updates available channels in input menu""" + """Updates available channels in input menu.""" sounddevice_index, device = self.devices[menu_index] nmax_channels = device["max_input_channels"] @@ -113,25 +113,39 @@ def update_channel_info(self, menu_index): @staticmethod def get_nfft_box(): - """Return a qw.QSlider for modifying FFT width""" + """Menu for choosing the FFT length. + + Returns: + FFT qw.QSlider. + + """ b = qw.QComboBox() b.addItems([str(f * 256) for f in [1, 2, 4, 8, 16]]) b.setCurrentIndex(2) return b def open_dir_dialog(self): - """Opens an os dialogue for selecting a directory""" + """Opens an os dialogue for selecting a directory.""" dir_name = qw.QFileDialog.getExistingDirectory(self, "Select a Directory") if dir_name: self.out_path = str(dir_name) self.dir_name_edit.setText(self.out_path) def on_ok_clicked(self): - """Close the menu when the user clicks ok and signal that main GUI can be opened""" + """Close the menu when the user clicks ok and signal that main GUI can be opened.""" self.accept() def get_input_settings(self): - """Returns user-configured input settings""" + """Collects and returns user-configured input settings. + + Returns: + sounddevice_idx: Index of the chosen sounddevice. + channels: List of selected channels. + fs: Chosen sampling rate. + fft_size: Chosen FFt size. + out_path: Chosen output path. + + """ sounddevice_idx = self.devices[self.input_options.currentIndex()][0] channels = self.channel_selector.get_selected_channels() fs = int(self.fs_options.currentText()) @@ -140,9 +154,15 @@ def get_input_settings(self): class ChannelSelector(qw.QWidget): - """Widget for the channel buttons on the right side of the input menu""" + """Widget for the channel buttons on the right side of the input menu.""" def __init__(self, n_channels, menu_buttons): + """Initialization function. + + Args: + n_channels: Number of channels to choose from. + menu_buttons: Buttons of the main menu. + """ super().__init__() self.setLayout(qw.QVBoxLayout()) @@ -159,7 +179,12 @@ def __init__(self, n_channels, menu_buttons): self.layout().addWidget(button) def get_selected_channels(self): - """Returns selected channels by the user in order""" + """Returns user-selected channels in order of selection. + + Returns: + Selected channels. + + """ return self.press_order def track_button_press(self, index): @@ -178,6 +203,15 @@ class MainWindow(qw.QMainWindow): """Main window that includes the main widget for the menu and all visualizations.""" def __init__(self, sounddevice_idx, channels, fs, fft_size, out_path): + """Initialization. + + Args: + sounddevice_idx: Index of the chosen sound device. + channels: List of chosen channels. + fs: Chosen sampling rate. + fft_size: Chosen FFT size. + out_path: Chosen output path. + """ super().__init__() # default settings for the entire GUI. @@ -188,15 +222,15 @@ def __init__(self, sounddevice_idx, channels, fs, fft_size, out_path): self.fft_size = fft_size self.out_path = out_path self.f0_algorithms = ["YIN"] - self.buf_len_sec = 30.0 + self.buf_len_sec = 30.0 # sec self.spec_scale_types = ["log", "linear"] self.ref_freq_modes = ["fixed", "highest", "lowest"] - self.disp_t_lvl = 1 - self.disp_t_spec = 1 - self.disp_t_stft = 5 - self.disp_t_f0 = 10 - self.disp_t_conf = 10 - self.lvl_cvals = [-80, -12, 0] + self.disp_t_lvl = 1 # sec + self.disp_t_spec = 1 # sec + self.disp_t_stft = 5 # sec + self.disp_t_f0 = 10 # sec + self.disp_t_conf = 10 # sec + self.lvl_cvals = [-80, -12, 0] # dBFS self.lvl_colors = ["green", "yellow", "red"] self.ch_colors = colors self.cur_disp_freq_lims = [ @@ -209,10 +243,10 @@ def __init__(self, sounddevice_idx, channels, fs, fft_size, out_path): ] # limits in cents for pitch trajectory view self.cur_spec_scale_type = self.spec_scale_types[0] self.cur_ref_freq_mode = self.ref_freq_modes[0] - self.cur_ref_freq = 220 + self.cur_ref_freq = 220 # Hz self.cur_conf_threshold = 0.5 - self.cur_derivative_tol = 600 - self.cur_smoothing_len = 3 + self.cur_gradient_tol = 600 # Cents + self.cur_smoothing_len = 3 # bins self.gui_refresh_ms = int(np.round(1000 / 60)) # 60 fps # status variables @@ -275,7 +309,7 @@ def __init__(self, sounddevice_idx, channels, fs, fft_size, out_path): self.play_pause() # start recording and plotting def play_pause(self): - """Starts or stops the GUI""" + """Starts or stops the GUI.""" if self.is_running: self.audio_processor.stop_stream() self.refresh_timer.stop() @@ -289,7 +323,7 @@ def play_pause(self): @qc.pyqtSlot() def refresh_gui(self): - """GUI refresh function, needs to be as fast as possible""" + """GUI refresh function, needs to be as fast as possible.""" # get preprocessed audio data from audio processor lvl, spec, inst_f0, stft, f0, diff = self.audio_processor.get_gui_data( @@ -304,7 +338,7 @@ def refresh_gui(self): conf_threshold=self.cur_conf_threshold, ref_freq_mode=self.cur_ref_freq_mode, ref_freq=self.cur_ref_freq, - gradient_tol=self.cur_derivative_tol, + gradient_tol=self.cur_gradient_tol, ) # update widgets @@ -312,7 +346,7 @@ def refresh_gui(self): self.trajectory_views.on_draw(f0, diff) def menu_toggle_button(self): - """The button for toggeling the menu""" + """The button for toggeling the menu.""" top_bar = qw.QHBoxLayout() top_bar.setContentsMargins(0, 0, 0, 0) top_bar.setSpacing(0) @@ -327,7 +361,7 @@ def menu_toggle_button(self): return top_bar def toggle_menu(self): - """Make menu appear or disappear""" + """Make menu appear or disappear.""" if self.menu_visible: self.menu.hide() self.toggle_button.setText("☰ Show Menu") @@ -337,7 +371,7 @@ def toggle_menu(self): self.menu_visible = not self.menu_visible def closeEvent(self, a0): - """Clean up when GUI is closed""" + """Clean up when GUI is closed.""" self.refresh_timer.stop() self.audio_processor.stop_stream() self.audio_processor.close_stream() @@ -345,10 +379,16 @@ def closeEvent(self, a0): class ProcessingMenu(qw.QFrame): - """The processing menu on the left side of the main window""" + """The processing menu on the left side of the main window.""" + + def __init__(self, main_window: MainWindow): + """Initialization. - def __init__(self, main_window: MainWindow, *args, **kwargs): - qw.QFrame.__init__(self, *args, **kwargs) + Args: + main_window: qw.QMainWindow instance. + + """ + qw.QFrame.__init__(self) self.main_window = main_window @@ -403,17 +443,13 @@ def __init__(self, main_window: MainWindow, *args, **kwargs): layout.addWidget(self.box_show_products, 4, 1, 1, 1) layout.addWidget(qw.QLabel("Minimum Frequency"), 5, 0) - self.freq_min = FloatQLineEdit( - parent=self, default=main_window.cur_disp_freq_lims[0] - ) + self.freq_min = FloatQLineEdit(default=main_window.cur_disp_freq_lims[0]) layout.addWidget(self.freq_min, 5, 1, 1, 1) self.freq_min.accepted_value.connect(self.on_min_freq_changed) layout.addWidget(qw.QLabel("Hz"), 5, 2) layout.addWidget(qw.QLabel("Maximum Frequency"), 6, 0) - self.freq_max = FloatQLineEdit( - parent=self, default=main_window.cur_disp_freq_lims[1] - ) + self.freq_max = FloatQLineEdit(default=main_window.cur_disp_freq_lims[1]) layout.addWidget(self.freq_max, 6, 1, 1, 1) self.freq_max.accepted_value.connect(self.on_max_freq_changed) layout.addWidget(qw.QLabel("Hz"), 6, 2) @@ -469,14 +505,14 @@ def __init__(self, main_window: MainWindow, *args, **kwargs): layout.addWidget(self.smoothing_label, 13, 2) layout.addWidget(qw.QLabel("Pitchslide Tolerance [Cents]"), 14, 0) - self.derivative_tol_slider = qw.QSlider() - self.derivative_tol_slider.setRange(0, 1200) - self.derivative_tol_slider.setValue(main_window.cur_derivative_tol) - self.derivative_tol_slider.setOrientation(qc.Qt.Orientation.Horizontal) - self.derivative_tol_label = qw.QLabel(f"{self.derivative_tol_slider.value()}") - self.derivative_tol_slider.valueChanged.connect(self.on_derivative_tol_changed) - layout.addWidget(self.derivative_tol_label, 14, 2) - layout.addWidget(self.derivative_tol_slider, 14, 1, 1, 1) + self.gradient_tol_slider = qw.QSlider() + self.gradient_tol_slider.setRange(0, 1200) + self.gradient_tol_slider.setValue(main_window.cur_gradient_tol) + self.gradient_tol_slider.setOrientation(qc.Qt.Orientation.Horizontal) + self.gradient_tol_label = qw.QLabel(f"{self.gradient_tol_slider.value()}") + self.gradient_tol_slider.valueChanged.connect(self.on_gradient_tol_changed) + layout.addWidget(self.gradient_tol_label, 14, 2) + layout.addWidget(self.gradient_tol_slider, 14, 1, 1, 1) layout.addWidget(qw.QLabel("Reference Mode"), 15, 0) self.ref_freq_mode_menu = qw.QComboBox() @@ -491,23 +527,19 @@ def __init__(self, main_window: MainWindow, *args, **kwargs): layout.addWidget(self.ref_freq_mode_menu, 15, 1, 1, 1) layout.addWidget(qw.QLabel("Reference Frequency"), 16, 0) - self.freq_box = FloatQLineEdit(parent=self, default=main_window.cur_ref_freq) + self.freq_box = FloatQLineEdit(default=main_window.cur_ref_freq) self.freq_box.accepted_value.connect(self.on_reference_frequency_changed) layout.addWidget(self.freq_box, 16, 1, 1, 1) layout.addWidget(qw.QLabel("Hz"), 16, 2) layout.addWidget(qw.QLabel("Minimum Pitch"), 17, 0) - self.pitch_min = FloatQLineEdit( - parent=self, default=main_window.cur_disp_pitch_lims[0] - ) + self.pitch_min = FloatQLineEdit(default=main_window.cur_disp_pitch_lims[0]) self.pitch_min.accepted_value.connect(self.on_pitch_min_changed) layout.addWidget(self.pitch_min, 17, 1, 1, 1) layout.addWidget(qw.QLabel("Cents"), 17, 2) layout.addWidget(qw.QLabel("Maximum Pitch"), 18, 0) - self.pitch_max = FloatQLineEdit( - parent=self, default=main_window.cur_disp_pitch_lims[1] - ) + self.pitch_max = FloatQLineEdit(default=main_window.cur_disp_pitch_lims[1]) self.pitch_max.accepted_value.connect(self.on_pitch_max_changed) layout.addWidget(self.pitch_max, 18, 1, 1, 1) layout.addWidget(qw.QLabel("Cents"), 18, 2) @@ -516,33 +548,40 @@ def __init__(self, main_window: MainWindow, *args, **kwargs): main_layout.addWidget(settings, 3, 0, 1, 2) def on_min_freq_changed(self, f): + """Update function for minimum frequency on user interaction.""" self.main_window.cur_disp_freq_lims[0] = int(f) self.main_window.channel_views.on_disp_freq_lims_changed( self.main_window.cur_disp_freq_lims ) def on_max_freq_changed(self, f): + """Update function for maximum frequency on user interaction.""" self.main_window.cur_disp_freq_lims[1] = int(f) self.main_window.channel_views.on_disp_freq_lims_changed( self.main_window.cur_disp_freq_lims ) def on_algorithm_select(self, algorithm): + """Update function for F0 algorithm on user interaction.""" self.main_window.audio_processor.f0_algorithm = algorithm def on_conf_threshold_changed(self, val): + """Update function for confidence threshold on user interaction.""" self.noise_thresh_label.setText(str(val / 10.0)) self.main_window.cur_conf_threshold = val / 10.0 def on_conf_smoothing_changed(self, val): + """Update function for smoothing filter length on user interaction.""" self.smoothing_label.setText(str(val)) self.main_window.cur_smoothing_len = val - def on_derivative_tol_changed(self, val): - self.derivative_tol_label.setText(str(val)) - self.main_window.cur_derivative_tol = val + def on_gradient_tol_changed(self, val): + """Update function for gradient filter tolerance on user interaction.""" + self.gradient_tol_label.setText(str(val)) + self.main_window.cur_gradient_tol = val def on_reference_frequency_mode_changed(self, text): + """Update function for reference frequency mode on user interaction.""" if (text == "Highest") or (text == "Lowest") or ("Channel" in text): self.freq_box.setReadOnly(True) else: @@ -553,33 +592,43 @@ def on_reference_frequency_mode_changed(self, text): self.main_window.cur_ref_freq_mode = text def on_reference_frequency_changed(self, val): + """Update function for reference frequency on user interaction.""" self.main_window.cur_ref_freq = val def on_pitch_min_changed(self, val): + """Update function for minimum pitch limit on user interaction.""" self.main_window.cur_disp_pitch_lims[0] = int(val) self.main_window.trajectory_views.on_disp_pitch_lims_changed( self.main_window.cur_disp_pitch_lims ) def on_pitch_max_changed(self, val): + """Update function for maximum pitch limit on user interaction.""" self.main_window.cur_disp_pitch_lims[1] = int(val) self.main_window.trajectory_views.on_disp_pitch_lims_changed( self.main_window.cur_disp_pitch_lims ) def on_spectrum_type_select(self, arg): + """Update function for spectrum type on user interaction.""" self.main_window.cur_spec_scale_type = arg def sizeHint(self): + """Size hint.""" return qc.QSize(100, 200) class ChannelViews(qw.QWidget): - """The central widget of the GUI that contains all channel views""" + """The central widget of the GUI that contains all channel views.""" refresh_signal = qc.pyqtSignal(np.ndarray, np.ndarray, np.ndarray, np.ndarray) def __init__(self, main_window: MainWindow): + """Initialization. + + Args: + main_window: qw.QMainWindow instance. + """ qw.QWidget.__init__(self) self.layout = qw.QVBoxLayout() self.layout.setSpacing(0) @@ -590,8 +639,8 @@ def __init__(self, main_window: MainWindow): self.views.append( ChannelView( main_window=main_window, - ch_id=ch_id, - orig_ch=orig_ch + 1, + soundcard_ch_id=ch_id, + disp_channel_id=orig_ch + 1, is_product=False, has_xlabel=False, ) @@ -615,45 +664,91 @@ def __init__(self, main_window: MainWindow): self.show_spectrogram_widgets(True) def show_level_widgets(self, show): + """Change visibility of level widgets. + + Args: + show: True for visible, False for invisible. + """ for view in self.views: view.show_level_widget(show) def show_spectrum_widgets(self, show): + """Change visibility of spectrum widgets. + + Args: + show: True for visible, False for invisible. + """ for view in self.views: view.show_spectrum_widget(show) def show_spectrogram_widgets(self, show): + """Change visibility of spectrogram widgets. + + Args: + show: True for visible, False for invisible. + """ for view in self.views: view.show_spectrogram_widget(show) def show_product_widgets(self, show): + """Change visibility of product widgets. + + Args: + show: True for visible, False for invisible. + """ self.views[-1].setVisible(show) self.h_line.setVisible(show) def on_disp_freq_lims_changed(self, disp_freq_lims): + """Changes frequency limits. + + Args: + disp_freq_lims: New frequency limits. + """ for view in self.views: view.on_disp_freq_lims_changed(disp_freq_lims) @qc.pyqtSlot() def on_draw(self, lvl, spec, inst_f0, stft): + """Trigger channel views refresh. + + Args: + lvl: New level. + spec: New spectrum. + inst_f0: New instantaneous frequency. + stft: New spectrogram. + + """ self.refresh_signal.emit(lvl, spec, inst_f0, stft) def sizeHint(self): + """Size hint.""" return qc.QSize(400, 200) def __iter__(self): + """Helper to enable iteration through channel views.""" yield from self.views class ChannelLabel(qw.QWidget): - """Widget that contains the vertical channel label""" + """Widget that contains the vertical channel label.""" def __init__(self, text): + """Initialization. + + Args: + text: The channel name. + """ super().__init__() self.text = text def paintEvent(self, event): - """Paints the label and updates it when necessary, e.g. when available space changes""" + """Paints the label and updates it when necessary, e.g. when available space changes. + + Args: + event: Trigger event. + + """ painter = qg.QPainter(self) painter.setFont(qg.QFont("Arial", 13, qg.QFont.Weight.Bold)) painter.setPen(qg.QColor("black")) @@ -669,7 +764,7 @@ def paintEvent(self, event): class ChannelView(qw.QWidget): """Widget that contains a channel label, level, spectrum and spectrogram, - a.k.a. one row of the central GUI widget + a.k.a. one row of the central GUI widget. """ level_refresh_signal = qc.pyqtSignal(float) @@ -679,25 +774,38 @@ class ChannelView(qw.QWidget): def __init__( self, main_window: MainWindow, - ch_id=None, - orig_ch=None, + soundcard_ch_id=None, + disp_channel_id=None, is_product=False, has_xlabel=True, - *args, - **kwargs, ): - qw.QWidget.__init__(self, *args, **kwargs) + """Initialization. + + Args: + main_window: Main pytch window. + soundcard_ch_id: Soundcard channel ID or None. + disp_channel_id: Display channel ID or None. + is_product: Bool that indicates whether channel view is for the product channel. + has_xlabel: Bool that indicates whether channel view should have x labels on plots. + """ + qw.QWidget.__init__(self) self.layout = qw.QHBoxLayout() self.layout.setSpacing(0) # keep GUI tight, remove frames around widgets self.layout.setContentsMargins(0, 0, 0, 0) self.main_window = main_window - self.color = "black" if ch_id is None else main_window.ch_colors[ch_id] + self.color = ( + "black" + if soundcard_ch_id is None + else main_window.ch_colors[soundcard_ch_id] + ) self.is_product = is_product - self.ch_id = ch_id + self.ch_id = soundcard_ch_id # channel label - label = ChannelLabel("Product" if ch_id is None else f"Channel {orig_ch}") + label = ChannelLabel( + "Product" if soundcard_ch_id is None else f"Channel {disp_channel_id}" + ) self.level_widget = LevelWidget(self.main_window, has_xlabel=has_xlabel) self.spectrogram_widget = SpectrogramWidget( @@ -724,7 +832,15 @@ def __init__( @qc.pyqtSlot(object, object, object, object) def on_draw(self, lvl, spec, inst_f0, stft): - """Refreshes all widgets as fast as possible""" + """Refreshes all widgets with new data. + + Args: + lvl: New level data. + spec: New spectrum data. + inst_f0: New instantaneous frequency data. + stft: New spectrogram data. + + """ idx = -1 if self.is_product else self.ch_id if len(lvl) > 0 and not self.is_product: @@ -737,15 +853,36 @@ def on_draw(self, lvl, spec, inst_f0, stft): self.spectrogram_refresh_signal.emit(stft[:, :, idx]) def show_spectrum_widget(self, show): + """Change visibility of spectrum widget. + + Args: + show: True for visible, False for invisible. + """ self.spectrum_widget.setVisible(show) def show_spectrogram_widget(self, show): + """Change visibility of spectrogram widget. + + Args: + show: True for visible, False for invisible. + """ self.spectrogram_widget.setVisible(show) def show_level_widget(self, show): + """Change visibility of level widget. + + Args: + show: True for visible, False for invisible. + """ self.level_widget.setVisible(show) def on_disp_freq_lims_changed(self, disp_freq_lims): + """Change frequency axis limits of spectrum and spectrogram. + + Args: + disp_freq_lims: New frequency limits. + + """ self.spectrum_widget.on_disp_freq_lims_changed(disp_freq_lims) self.spectrogram_widget.on_disp_freq_lims_changed(disp_freq_lims) @@ -754,6 +891,12 @@ class LevelWidget(pg.GraphicsLayoutWidget): """The level meter with color-coded dB levels""" def __init__(self, main_window: MainWindow, has_xlabel=True): + """Initialization. + + Args: + main_window: Main pytch window. + has_xlabel: Bool that indicates whether plot has x label. + """ super(LevelWidget, self).__init__() self.main_window = main_window @@ -796,7 +939,12 @@ def __init__(self, main_window: MainWindow, has_xlabel=True): @qc.pyqtSlot(float) def on_draw(self, lvl): - """Updates the image with new data.""" + """Updates the image with new data. + + Args: + lvl: New audio level. + + """ start_t = time() lvl_conv = self.lvl_converter(lvl) plot_array = np.linspace( @@ -812,9 +960,17 @@ def on_draw(self, lvl): class SpectrumWidget(pg.GraphicsLayoutWidget): - """Spectrum plot with current fundamental frequency as dashed line""" + """Spectrum plot with current fundamental frequency as dashed line.""" def __init__(self, main_window: MainWindow, color, has_xlabel=True): + """Initialization. + + Args: + main_window: Main pytch window. + color: Color to use for this widget. + has_xlabel: Bool that indicates whether plot has x label. + + """ super(SpectrumWidget, self).__init__() self.main_window = main_window @@ -879,6 +1035,14 @@ class SpectrogramWidget(pg.GraphicsLayoutWidget): """Spectrogram widget""" def __init__(self, main_window: MainWindow, color, has_xlabel=True): + """Initialization. + + Args: + main_window: Main pytch window. + color: Color to use for this widget. + has_xlabel: Bool that indicates whether plot has x label. + + """ super().__init__() self.main_window = main_window @@ -936,7 +1100,12 @@ def on_disp_freq_lims_changed(self, disp_freq_lims): @qc.pyqtSlot(object) def on_draw(self, data_plot): - """Updates the spectrogram with new data.""" + """Updates the spectrogram data. + + Args: + data_plot: New spectrogram. + + """ start_t = time() self.img.setImage(data_plot.T, autoLevels=False) self.img.setRect( @@ -951,10 +1120,15 @@ def on_draw(self, data_plot): class TrajectoryViews(qw.QTabWidget): - """Right-hand widget that contains the visualization of the F0-trajectories and the differential""" + """Right-hand widget that contains the visualization of the F0-trajectories and the differential.""" + + def __init__(self, main_window: MainWindow): + """Initialization. - def __init__(self, main_window: MainWindow, *args, **kwargs): - qw.QTabWidget.__init__(self, *args, **kwargs) + Args: + main_window: Main pytch window. + """ + qw.QTabWidget.__init__(self) self.main_window = main_window @@ -991,22 +1165,46 @@ def __init__(self, main_window: MainWindow, *args, **kwargs): @qc.pyqtSlot(object, object) def on_draw(self, f0, diff): + """Update pitch and pitch differences view. Only update selected. + + Args: + f0: New F0 trajectories. + diff: New pitch differences. + + """ start_t = time() - if len(f0) > 0: + if len(f0) > 0 and self.currentIndex() == 0: self.pitch_view.on_draw(f0) - if len(self.main_window.channels) > 1 and len(diff) > 0: + if ( + len(self.main_window.channels) > 1 + and len(diff) > 0 + and self.currentIndex() == 1 + ): self.pitch_diff_view.on_draw(diff) logger.debug(f"Trajectory view update took {time() - start_t:.4f}s.") def on_disp_pitch_lims_changed(self, disp_pitch_lims): + """Update pitch limits on user interaction. + + Args: + disp_pitch_lims: New pitch limits. + + """ self.change_pitch_lims(self.pitch_view, disp_pitch_lims) if len(self.main_window.channels) > 1: self.change_pitch_lims(self.pitch_diff_view, disp_pitch_lims) @staticmethod def change_pitch_lims(view, disp_pitch_lims): + """Update pitch limits of given view. + + Args: + view: Pitch or Differences view. + disp_pitch_lims: New pitch limits. + + """ # Set the x-axis range view.plot_item.setXRange(0, len(view.t_axis)) @@ -1018,6 +1216,12 @@ def change_pitch_lims(view, disp_pitch_lims): view.plot_item.getAxis("left").setTicks([[(y, str(y)) for y in y_ticks]]) def show_trajectory_views(self, show): + """Change visibility of trajectory views. + + Args: + show: Bool, True indicates visible, False invisible. + + """ self.setVisible(show) def sizeHint(self): @@ -1026,12 +1230,18 @@ def sizeHint(self): class PitchWidget(pg.GraphicsLayoutWidget): - """Visualization of the F0-trajectories of each channel""" + """Visualization of the F0-trajectories of each channel.""" low_pitch_changed = qc.pyqtSignal(np.ndarray) - def __init__(self, main_window: MainWindow, *args, **kwargs): - super(PitchWidget, self).__init__(*args, **kwargs) + def __init__(self, main_window: MainWindow): + """Initialization. + + Args: + main_window: Main pytch window. + + """ + super(PitchWidget, self).__init__() self.main_window = main_window self.channel_views = main_window.channel_views.views[:-1] @@ -1079,17 +1289,28 @@ def __init__(self, main_window: MainWindow, *args, **kwargs): @qc.pyqtSlot(object) def on_draw(self, f0): - """Updates the F0 trajectories for each channel.""" + """Updates the F0 trajectories for each channel. + + Args: + f0: New F0 trajectories. + + """ if len(f0) > 0: for i in range(f0.shape[1]): self._lines[i].setData(self.t_axis, f0[:, i]) # Update the line data class DifferentialPitchWidget(pg.GraphicsLayoutWidget): - """Visualization of the pair-wise F0-differences""" + """Visualization of the pair-wise F0 differences.""" + + def __init__(self, main_window: MainWindow): + """Initialization. + + Args: + main_window: Main pytch window. - def __init__(self, main_window: MainWindow, *args, **kwargs): - super(DifferentialPitchWidget, self).__init__(*args, **kwargs) + """ + super(DifferentialPitchWidget, self).__init__() self.main_window = main_window self.channel_views = main_window.channel_views.views[:-1] self.ci.layout.setContentsMargins(0, 0, 0, 0) @@ -1155,7 +1376,12 @@ def __init__(self, main_window: MainWindow, *args, **kwargs): @qc.pyqtSlot(object) def on_draw(self, diff): - """Updates the pitch differences for each channel pair.""" + """Updates the pitch differences for each channel pair. + + Args: + diff: New pitch differences. + + """ if len(diff) > 0: for i in range(diff.shape[1]): self._lines[i][0].setData( diff --git a/pytch/gui_utils.py b/pytch/gui_utils.py index f14b73d..5871037 100644 --- a/pytch/gui_utils.py +++ b/pytch/gui_utils.py @@ -25,7 +25,7 @@ class FloatQLineEdit(qw.QLineEdit): accepted_value = qc.pyqtSignal(float) - def __init__(self, default=None, *args, **kwargs): + def __init__(self, default=None): """Initialization. Args: