@misc{10481/93662, year = {2024}, month = {7}, url = {https://hdl.handle.net/10481/93662}, abstract = {The scarcity of labelled data makes training Deep Neural Network (DNN) models in bioacoustic applications challenging. In typical bioacoustics applications, manually labelling the required amount of data can be prohibitively expensive. To effectively identify both new and current classes, DNN models must continue to learn new features from a modest amount of fresh data. Active Learning (AL) is an approach that can help with this learning while requiring little labelling effort. Nevertheless, the use of fixed feature extraction approaches limits feature quality, resulting in underutilization of the benefits of AL. We describe an AL framework that addresses this issue by incorporating feature extraction into the AL loop and refining the feature extractor after each round of manual annotation. In addition, we use raw audio processing rather than spectrograms, which is a novel approach. Experiments reveal that the proposed AL framework requires 14.3%, 66.7%, and 47.4% less labelling effort on benchmark audio datasets ESC-50, UrbanSound8k, and InsectWingBeat, respectively, for a large DNN model and similar savings on a microcontroller-based counterpart. Furthermore, we showcase the practical relevance of our study by incorporating data from conservation biology projects. All codes are publicly available on GitHub.}, organization = {Australian Research Council under grant DE19010 0 045}, publisher = {IEEE}, keywords = {Deep Learning}, keywords = {Active Learning}, keywords = {Deep Active Learning}, title = {Deep Active Audio Feature Learning in Resource-Constrained Environments}, doi = {10.48550/arXiv.2308.13201}, author = {Mohaimenuzzaman, Md and Bergmeir, Christoph Norbert and Meyer, Bernd}, }