Avramidis, Kleanthis; Stewart, Shanti; Narayanan, Shrikanth
On the Role of Visual Context in Enriching Music Representations Conference
IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP) , 2023.
Abstract | Links | BibTeX | Tags: multimedia understanding, multimodal, music representations
@conference{avramidi-etal-vcmr,
title = {On the Role of Visual Context in Enriching Music Representations},
author = {Kleanthis Avramidis and Shanti Stewart and Shrikanth Narayanan},
doi = {https://arxiv.org/abs/2210.15828},
year = {2023},
date = {2023-02-15},
urldate = {2023-02-15},
publisher = {IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP) },
abstract = {Human perception and experience of music is highly context-dependent. Contextual variability contributes to differences in how we interpret and interact with music, challenging the design of robust models for information retrieval. Incorporating multimodal context from diverse sources provides a promising approach toward modeling this variability. Music presented in media such as movies and music videos provide rich multimodal context that modulates underlying human experiences. However, such context modeling is underexplored, as it requires large amounts of multimodal data along with relevant annotations. Self-supervised learning can help address these challenges by automatically extracting rich, high-level correspondences between different modalities, hence alleviating the need for fine-grained annotations at scale. In this study, we propose VCMR -- Video-Conditioned Music Representations, a contrastive learning framework that learns music representations from audio and the accompanying music videos. The contextual visual information enhances representations of music audio, as evaluated on the downstream task of music tagging. Experimental results show that the proposed framework can contribute additive robustness to audio representations and indicates to what extent musical elements are affected or determined by visual context.},
keywords = {multimedia understanding, multimodal, music representations},
pubstate = {published},
tppubtype = {conference}
}
Greer, Timothy; Shi, Xuan; Ma, Benjamin; Narayanan, Shrikanth
Creating musical features using multi-faceted, multi-task encoders based on transformers Journal Article
In: Scientific Reports, 13 (1), pp. 10713, 2023.
Abstract | BibTeX | Tags: autoencoders, music representations, self-supervision
@article{greer2023creating,
title = {Creating musical features using multi-faceted, multi-task encoders based on transformers},
author = {Timothy Greer and Xuan Shi and Benjamin Ma and Shrikanth Narayanan},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Scientific Reports},
volume = {13},
number = {1},
pages = {10713},
publisher = {Nature Publishing Group UK London},
abstract = {Computational machine intelligence approaches have enabled a variety of music-centric technologies in support of creating, sharing and interacting with music content. A strong performance on specific downstream application tasks, such as music genre detection and music emotion recognition, is paramount to ensuring broad capabilities for computational music understanding and Music Information Retrieval. Traditional approaches have relied on supervised learning to train models to support these music-related tasks. However, such approaches require copious annotated data and still may only provide insight into one view of music—namely, that related to the specific task at hand. We present a new model for generating audio-musical features that support music understanding, leveraging self-supervision and cross-domain learning. After pre-training using masked reconstruction of musical input features using self-attention bidirectional transformers, output representations are fine-tuned using several downstream music understanding tasks. Results show that the features generated by our multi-faceted, multi-task, music transformer model, which we call M3BERT, tend to outperform other audio and music embeddings on several diverse music-related tasks, indicating the potential of self-supervised and semi-supervised learning approaches toward a more generalized and robust computational approach to modeling music. Our work can offer a starting point for many music-related modeling tasks, with potential applications in learning deep representations and enabling robust technology applications.},
keywords = {autoencoders, music representations, self-supervision},
pubstate = {published},
tppubtype = {article}
}