From c43b380e70136c65e4f2c0644fa50e1fb21b4fee Mon Sep 17 00:00:00 2001 From: Yoach Lacombe <52246514+ylacombe@users.noreply.github.com> Date: Mon, 18 Mar 2024 13:06:12 +0000 Subject: [PATCH] Add MusicGen Melody (#28819) * first modeling code * make repository * still WIP * update model * add tests * add latest change * clean docstrings and copied from * update docstrings md and readme * correct chroma function * correct copied from and remove unreleated test * add doc to toctree * correct imports * add convert script to notdoctested * Add suggestion from Sanchit Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> * correct get_uncoditional_inputs docstrings * modify README according to SANCHIT feedback * add chroma to audio utils * clean librosa and torchaudio hard dependencies * fix FE * refactor audio decoder -> audio encoder for consistency with previous musicgen * refactor conditional -> encoder * modify sampling rate logics * modify license at the beginning * refactor all_self_attns->all_attentions * remove ignore copy from causallm generate * add copied from for from_sub_models * fix make copies * add warning if audio is truncated * add copied from where relevant * remove artefact * fix convert script * fix torchaudio and FE * modify chroma method according to feedback-> better naming * refactor input_values->input_features * refactor input_values->input_features and fix import fe * add input_features to docstrigs * correct inputs_embeds logics * remove dtype conversion * refactor _prepare_conditional_hidden_states_kwargs_for_generation ->_prepare_encoder_hidden_states_kwargs_for_generation * change warning for chroma length * Update src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> * change way to save wav, using soundfile * correct docs and change to soundfile * fix import * fix init proj layers * remove line breaks from md * fix issue with docstrings * add FE suggestions * improve is in logics and remove useless imports * remove custom from_pretrained * simplify docstring code * add suggestions for modeling tests * make style * update converting script with sanity check * remove encoder attention mask from conditional generation * replace musicgen melody checkpoints with official orga * rename ylacombe->facebook in checkpoints * fix copies * remove unecessary warning * add shape in code docstrings * add files to slow doc tests * fix md bug and add md to not_tested * make fix-copies * fix hidden states test and batching --------- Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> --- README.md | 1 + README_de.md | 1 + README_es.md | 1 + README_fr.md | 1 + README_hd.md | 1 + README_ja.md | 1 + README_ko.md | 1 + README_pt-br.md | 1 + README_ru.md | 1 + README_te.md | 1 + README_vi.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.md | 1 + docs/source/en/model_doc/musicgen_melody.md | 288 ++ docs/source/en/tasks/language_modeling.md | 2 +- src/transformers/__init__.py | 49 + src/transformers/audio_utils.py | 100 +- src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + src/transformers/models/auto/modeling_auto.py | 2 + .../models/auto/tokenization_auto.py | 1 + .../models/musicgen_melody/__init__.py | 90 + .../configuration_musicgen_melody.py | 256 ++ .../convert_musicgen_melody_transformers.py | 266 ++ .../feature_extraction_musicgen_melody.py | 330 +++ .../modeling_musicgen_melody.py | 2398 +++++++++++++++++ .../processing_musicgen_melody.py | 174 ++ src/transformers/utils/dummy_pt_objects.py | 31 + .../utils/dummy_torchaudio_objects.py | 16 + tests/models/musicgen_melody/__init__.py | 0 ...test_feature_extraction_musicgen_melody.py | 231 ++ .../test_modeling_musicgen_melody.py | 1449 ++++++++++ .../test_processor_musicgen_melody.py | 179 ++ tests/utils/test_audio_utils.py | 60 + utils/check_docstrings.py | 1 + utils/check_repo.py | 1 + utils/not_doctested.txt | 2 + utils/slow_documentation_tests.txt | 4 +- 40 files changed, 5947 insertions(+), 3 deletions(-) create mode 100644 docs/source/en/model_doc/musicgen_melody.md create mode 100644 src/transformers/models/musicgen_melody/__init__.py create mode 100644 src/transformers/models/musicgen_melody/configuration_musicgen_melody.py create mode 100644 src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py create mode 100644 src/transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py create mode 100644 src/transformers/models/musicgen_melody/modeling_musicgen_melody.py create mode 100644 src/transformers/models/musicgen_melody/processing_musicgen_melody.py create mode 100644 src/transformers/utils/dummy_torchaudio_objects.py create mode 100644 tests/models/musicgen_melody/__init__.py create mode 100644 tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py create mode 100644 tests/models/musicgen_melody/test_modeling_musicgen_melody.py create mode 100644 tests/models/musicgen_melody/test_processor_musicgen_melody.py diff --git a/README.md b/README.md index fb3792d16b7f7a..fdbd59883dd541 100644 --- a/README.md +++ b/README.md @@ -442,6 +442,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (from the University of Wisconsin - Madison) released with the paper [Multi Resolution Analysis (MRA) for Approximate Self-Attention](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu. diff --git a/README_de.md b/README_de.md index f00338c1939b05..a1a016767194d3 100644 --- a/README_de.md +++ b/README_de.md @@ -438,6 +438,7 @@ Aktuelle Anzahl der Checkpoints: ![](https://img.shields.io/endpoint?url=https:/ 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (from the University of Wisconsin - Madison) released with the paper [Multi Resolution Analysis (MRA) for Approximate Self-Attention](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu. diff --git a/README_es.md b/README_es.md index 9c0bf941f8bca2..61a5f284b88e23 100644 --- a/README_es.md +++ b/README_es.md @@ -415,6 +415,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (from the University of Wisconsin - Madison) released with the paper [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu. diff --git a/README_fr.md b/README_fr.md index 02dde90bf57389..ab0a790c284dfb 100644 --- a/README_fr.md +++ b/README_fr.md @@ -436,6 +436,7 @@ Nombre actuel de points de contrôle : ![](https://img.shields.io/endpoint?url=h 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (de l'Université du Wisconsin - Madison) a été publié dans l'article [Analyse multi-résolution (MRA) pour une auto-attention approximative](https://arxiv.org/abs/2207.10284) par Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (de Google AI) a été publié dans l'article [mT5 : un transformateur texte-à-texte pré-entraîné massivement multilingue](https://arxiv.org/abs/2010.11934) par Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (de Meta) a été publié dans l'article [Génération de musique simple et contrôlable](https://arxiv.org/abs/2306.05284) par Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi et Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (de Meta) publié dans l'article [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) parJade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (de RUC AI Box) a été publié dans l'article [MVP : Pré-entraînement supervisé multi-tâche pour la génération de langage naturel](https://arxiv.org/abs/2206.12131) par Tianyi Tang, Junyi Li, Wayne Xin Zhao et Ji-Rong Wen. 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (de SHI Labs) a été publié dans l'article [Transformateur d'attention de voisinage](https://arxiv.org/abs/2204.07143) par Ali Hassani, Steven Walton, Jiachen Li, Shen Li et Humphrey Shi. 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (du laboratoire Noah's Ark de Huawei) a été publié dans l'article [NEZHA : Représentation contextualisée neurale pour la compréhension du langage chinois](https://arxiv.org/abs/1909.00204) par Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen et Qun Liu. diff --git a/README_hd.md b/README_hd.md index 61807ab226aec1..4c1b6a419e448c 100644 --- a/README_hd.md +++ b/README_hd.md @@ -389,6 +389,7 @@ conda install conda-forge::transformers 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (the University of Wisconsin - Madison से) Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. द्वाराअनुसंधान पत्र [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284) के साथ जारी किया गया 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (Google AI से) साथ वाला पेपर [mT5: एक व्यापक बहुभाषी पूर्व-प्रशिक्षित टेक्स्ट-टू-टेक्स्ट ट्रांसफॉर्मर](https://arxiv.org/abs/2010.11934) लिंटिंग ज़ू, नोआ कॉन्सटेंट, एडम रॉबर्ट्स, मिहिर काले, रामी अल-रफू, आदित्य सिद्धांत, आदित्य बरुआ, कॉलिन रैफेल द्वारा पोस्ट किया गया। 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (हुआवेई नूह के आर्क लैब से) साथ में कागज़ [NEZHA: चीनी भाषा समझ के लिए तंत्रिका प्रासंगिक प्रतिनिधित्व](https://arxiv.org/abs/1909.00204) जुन्किउ वेई, ज़ियाओज़े रेन, ज़िआओगुआंग ली, वेनयोंग हुआंग, यी लियाओ, याशेंग वांग, जियाशू लिन, शिन जियांग, जिओ चेन और कुन लियू द्वारा। diff --git a/README_ja.md b/README_ja.md index 6b224b3b6c18f4..7efc8cd0570637 100644 --- a/README_ja.md +++ b/README_ja.md @@ -449,6 +449,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (the University of Wisconsin - Madison から) Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. から公開された研究論文 [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284) 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (Google AI から) Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel から公開された研究論文: [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (RUC AI Box から) Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen から公開された研究論文: [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (SHI Labs から) Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi から公開された研究論文: [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (Huawei Noah’s Ark Lab から) Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu から公開された研究論文: [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) diff --git a/README_ko.md b/README_ko.md index 36dadecf3a248d..9004123d880cc0 100644 --- a/README_ko.md +++ b/README_ko.md @@ -364,6 +364,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (the University of Wisconsin - Madison 에서 제공)은 Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh.의 [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284) 논문과 함께 발표했습니다. 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (Google AI 에서) Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel 의 [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) 논문과 함께 발표했습니다. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (RUC AI Box 에서) Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen 의 [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 논문과 함께 발표했습니다. 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (SHI Labs 에서) Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi 의 [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) 논문과 함께 발표했습니다. 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (Huawei Noah’s Ark Lab 에서) Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu 의 [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) 논문과 함께 발표했습니다. diff --git a/README_pt-br.md b/README_pt-br.md index 0066a8850145fe..ef4c9b201b5f62 100644 --- a/README_pt-br.md +++ b/README_pt-br.md @@ -447,6 +447,7 @@ Número atual de pontos de verificação: ![](https://img.shields.io/endpoint?ur 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (from the University of Wisconsin - Madison) released with the paper [Multi Resolution Analysis (MRA) for Approximate Self-Attention](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu. diff --git a/README_ru.md b/README_ru.md index 9a486bc7f5d9e2..8139fa51b994a2 100644 --- a/README_ru.md +++ b/README_ru.md @@ -437,6 +437,7 @@ conda install conda-forge::transformers 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (from the University of Wisconsin - Madison) released with the paper [Multi Resolution Analysis (MRA) for Approximate Self-Attention](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu. diff --git a/README_te.md b/README_te.md index 522539713c5668..00acdccd28c6fd 100644 --- a/README_te.md +++ b/README_te.md @@ -439,6 +439,7 @@ Flax, PyTorch లేదా TensorFlow యొక్క ఇన్‌స్టా 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (from the University of Wisconsin - Madison) released with the paper [Multi Resolution Analysis (MRA) for Approximate Self-Attention](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu. diff --git a/README_vi.md b/README_vi.md index 293ce00ad79361..3a0a2f09db0227 100644 --- a/README_vi.md +++ b/README_vi.md @@ -438,6 +438,7 @@ Số lượng điểm kiểm tra hiện tại: ![](https://img.shields.io/endpoi 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (từ the University of Wisconsin - Madison) được phát hành với bài báo [Multi Resolution Analysis (MRA) for Approximate Self-Attention](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (từ Google AI) được phát hành với bài báo [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (từ Meta) được phát hành với bài báo [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (từ Meta) được phát hành với bài báo [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (từ RUC AI Box) được phát hành với bài báo [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (từ SHI Labs) được phát hành với bài báo [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (từ Huawei Noah’s Ark Lab) được phát hành với bài báo [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu. diff --git a/README_zh-hans.md b/README_zh-hans.md index 198e393b89105c..52a244bd8aa3eb 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -388,6 +388,7 @@ conda install conda-forge::transformers 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (来自 the University of Wisconsin - Madison) 伴随论文 [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284) 由 Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh 发布。 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (来自 Google AI) 伴随论文 [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) 由 Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel 发布。 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (来自 中国人民大学 AI Box) 伴随论文 [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 由 Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen 发布。 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (来自 SHI Labs) 伴随论文 [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) 由 Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi 发布。 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (来自华为诺亚方舟实验室) 伴随论文 [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) 由 Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 085c37a162c842..fd306c6176fa9d 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -400,6 +400,7 @@ conda install conda-forge::transformers 1. **[MRA](https://huggingface.co/docs/transformers/model_doc/mra)** (from the University of Wisconsin - Madison) released with the paper [Multi Resolution Analysis (MRA)](https://arxiv.org/abs/2207.10284) by Zhanpeng Zeng, Sourav Pal, Jeffery Kline, Glenn M Fung, Vikas Singh. 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MusicGen Melody](https://huggingface.co/docs/transformers/main/model_doc/musicgen_melody)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. 1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. 1. **[NAT](https://huggingface.co/docs/transformers/model_doc/nat)** (from SHI Labs) released with the paper [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index e4ee69058bbc85..47c7ff6602c0e1 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -642,6 +642,8 @@ title: MMS - local: model_doc/musicgen title: MusicGen + - local: model_doc/musicgen_melody + title: MusicGen Melody - local: model_doc/pop2piano title: Pop2Piano - local: model_doc/seamless_m4t diff --git a/docs/source/en/index.md b/docs/source/en/index.md index 7f2a4bc35fec9a..ec8def2b2ef31b 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -207,6 +207,7 @@ Flax), PyTorch, and/or TensorFlow. | [MRA](model_doc/mra) | ✅ | ❌ | ❌ | | [MT5](model_doc/mt5) | ✅ | ✅ | ✅ | | [MusicGen](model_doc/musicgen) | ✅ | ❌ | ❌ | +| [MusicGen Melody](model_doc/musicgen_melody) | ✅ | ❌ | ❌ | | [MVP](model_doc/mvp) | ✅ | ❌ | ❌ | | [NAT](model_doc/nat) | ✅ | ❌ | ❌ | | [Nezha](model_doc/nezha) | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/musicgen_melody.md b/docs/source/en/model_doc/musicgen_melody.md new file mode 100644 index 00000000000000..4d92d861f0bb5f --- /dev/null +++ b/docs/source/en/model_doc/musicgen_melody.md @@ -0,0 +1,288 @@ + + +# MusicGen Melody + +## Overview + +The MusicGen Melody model was proposed in [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. + +MusicGen Melody is a single stage auto-regressive Transformer model capable of generating high-quality music samples conditioned on text descriptions or audio prompts. The text descriptions are passed through a frozen text encoder model to obtain a sequence of hidden-state representations. MusicGen is then trained to predict discrete audio tokens, or *audio codes*, conditioned on these hidden-states. These audio tokens are then decoded using an audio compression model, such as EnCodec, to recover the audio waveform. + +Through an efficient token interleaving pattern, MusicGen does not require a self-supervised semantic representation of the text/audio prompts, thus eliminating the need to cascade multiple models to predict a set of codebooks (e.g. hierarchically or upsampling). Instead, it is able to generate all the codebooks in a single forward pass. + +The abstract from the paper is the following: + +*We tackle the task of conditional music generation. We introduce MusicGen, a single Language Model (LM) that operates over several streams of compressed discrete music representation, i.e., tokens. Unlike prior work, MusicGen is comprised of a single-stage transformer LM together with efficient token interleaving patterns, which eliminates the need for cascading several models, e.g., hierarchically or upsampling. Following this approach, we demonstrate how MusicGen can generate high-quality samples, while being conditioned on textual description or melodic features, allowing better controls over the generated output. We conduct extensive empirical evaluation, considering both automatic and human studies, showing the proposed approach is superior to the evaluated baselines on a standard text-to-music benchmark. Through ablation studies, we shed light over the importance of each of the components comprising MusicGen.* + + +This model was contributed by [ylacombe](https://huggingface.co/ylacombe). The original code can be found [here](https://github.com/facebookresearch/audiocraft). The pre-trained checkpoints can be found on the [Hugging Face Hub](https://huggingface.co/models?sort=downloads&search=facebook%2Fmusicgen). + + +## Difference with [MusicGen](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen) + +There are two key differences with MusicGen: +1. The audio prompt is used here as a conditional signal for the generated audio sample, whereas it's used for audio continuation in [MusicGen](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen). +2. Conditional text and audio signals are concatenated to the decoder's hidden states instead of being used as a cross-attention signal, as in MusicGen. + +## Generation + +MusicGen Melody is compatible with two generation modes: greedy and sampling. In practice, sampling leads to significantly better results than greedy, thus we encourage sampling mode to be used where possible. Sampling is enabled by default, and can be explicitly specified by setting `do_sample=True` in the call to [`MusicgenMelodyForConditionalGeneration.generate`], or by overriding the model's generation config (see below). + +Transformers supports both mono (1-channel) and stereo (2-channel) variants of MusicGen Melody. The mono channel versions generate a single set of codebooks. The stereo versions generate 2 sets of codebooks, 1 for each channel (left/right), and each set of codebooks is decoded independently through the audio compression model. The audio streams for each channel are combined to give the final stereo output. + + +#### Audio Conditional Generation + +The model can generate an audio sample conditioned on a text and an audio prompt through use of the [`MusicgenMelodyProcessor`] to pre-process the inputs. + +In the following examples, we load an audio file using the 🤗 Datasets library, which can be pip installed through the command below: + +``` +pip install --upgrade pip +pip install datasets[audio] +``` + +The audio file we are about to use is loaded as follows: +```python +>>> from datasets import load_dataset + +>>> dataset = load_dataset("sanchit-gandhi/gtzan", split="train", streaming=True) +>>> sample = next(iter(dataset))["audio"] +``` + +The audio prompt should ideally be free of the low-frequency signals usually produced by instruments such as drums and bass. The [Demucs](https://github.com/adefossez/demucs/tree/main) model can be used to separate vocals and other signals from the drums and bass components. + +If you wish to use Demucs, you first need to follow the installation steps [here](https://github.com/adefossez/demucs/tree/main?tab=readme-ov-file#for-musicians) before using the following snippet: + +```python +from demucs import pretrained +from demucs.apply import apply_model +from demucs.audio import convert_audio +import torch + + +wav = torch.tensor(sample["array"]).to(torch.float32) + +demucs = pretrained.get_model('htdemucs') + +wav = convert_audio(wav[None], sample["sampling_rate"], demucs.samplerate, demucs.audio_channels) +wav = apply_model(demucs, wav[None]) +``` + +You can then use the following snippet to generate music: + +```python +>>> from transformers import AutoProcessor, MusicgenMelodyForConditionalGeneration + +>>> processor = AutoProcessor.from_pretrained("facebook/musicgen-melody") +>>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") + +>>> inputs = processor( +... audio=wav, +... sampling_rate=demucs.samplerate, +... text=["80s blues track with groovy saxophone"], +... padding=True, +... return_tensors="pt", +... ) +>>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) +``` + +You can also pass the audio signal directly without using Demucs, although the quality of the generation will probably be degraded: + +```python +>>> from transformers import AutoProcessor, MusicgenMelodyForConditionalGeneration + +>>> processor = AutoProcessor.from_pretrained("facebook/musicgen-melody") +>>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") + +>>> inputs = processor( +... audio=sample["array"], +... sampling_rate=sample["sampling_rate"], +... text=["80s blues track with groovy saxophone"], +... padding=True, +... return_tensors="pt", +... ) +>>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) +``` + +The audio outputs are a three-dimensional Torch tensor of shape `(batch_size, num_channels, sequence_length)`. To listen to the generated audio samples, you can either play them in an ipynb notebook: + +```python +from IPython.display import Audio + +sampling_rate = model.config.audio_encoder.sampling_rate +Audio(audio_values[0].numpy(), rate=sampling_rate) +``` + +Or save them as a `.wav` file using a third-party library, e.g. `soundfile`: + +```python +>>> import soundfile as sf + +>>> sampling_rate = model.config.audio_encoder.sampling_rate +>>> sf.write("musicgen_out.wav", audio_values[0].T.numpy(), sampling_rate) +``` + + +### Text-only Conditional Generation + +The same [`MusicgenMelodyProcessor`] can be used to pre-process a text-only prompt. + +```python +>>> from transformers import AutoProcessor, MusicgenMelodyForConditionalGeneration + +>>> processor = AutoProcessor.from_pretrained("facebook/musicgen-melody") +>>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") + +>>> inputs = processor( +... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], +... padding=True, +... return_tensors="pt", +... ) +>>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) +``` + +The `guidance_scale` is used in classifier free guidance (CFG), setting the weighting between the conditional logits (which are predicted from the text prompts) and the unconditional logits (which are predicted from an unconditional or 'null' prompt). Higher guidance scale encourages the model to generate samples that are more closely linked to the input prompt, usually at the expense of poorer audio quality. CFG is enabled by setting `guidance_scale > 1`. For best results, use `guidance_scale=3` (default). + + +You can also generate in batch: + +```python +>>> from transformers import AutoProcessor, MusicgenMelodyForConditionalGeneration +>>> from datasets import load_dataset + +>>> processor = AutoProcessor.from_pretrained("facebook/musicgen-melody") +>>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") + +>>> # take the first quarter of the audio sample +>>> sample_1 = sample["array"][: len(sample["array"]) // 4] + +>>> # take the first half of the audio sample +>>> sample_2 = sample["array"][: len(sample["array"]) // 2] + +>>> inputs = processor( +... audio=[sample_1, sample_2], +... sampling_rate=sample["sampling_rate"], +... text=["80s blues track with groovy saxophone", "90s rock song with loud guitars and heavy drums"], +... padding=True, +... return_tensors="pt", +... ) +>>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) +``` + +### Unconditional Generation + +The inputs for unconditional (or 'null') generation can be obtained through the method [`MusicgenMelodyProcessor.get_unconditional_inputs`]: + +```python +>>> from transformers import MusicgenMelodyForConditionalGeneration, MusicgenMelodyProcessor + +>>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") +>>> unconditional_inputs = MusicgenMelodyProcessor.from_pretrained("facebook/musicgen-melody").get_unconditional_inputs(num_samples=1) + +>>> audio_values = model.generate(**unconditional_inputs, do_sample=True, max_new_tokens=256) +``` + +### Generation Configuration + +The default parameters that control the generation process, such as sampling, guidance scale and number of generated tokens, can be found in the model's generation config, and updated as desired: + +```python +>>> from transformers import MusicgenMelodyForConditionalGeneration + +>>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") + +>>> # inspect the default generation config +>>> model.generation_config + +>>> # increase the guidance scale to 4.0 +>>> model.generation_config.guidance_scale = 4.0 + +>>> # decrease the max length to 256 tokens +>>> model.generation_config.max_length = 256 +``` + +Note that any arguments passed to the generate method will **supersede** those in the generation config, so setting `do_sample=False` in the call to generate will supersede the setting of `model.generation_config.do_sample` in the generation config. + +## Model Structure + +The MusicGen model can be de-composed into three distinct stages: +1. Text encoder: maps the text inputs to a sequence of hidden-state representations. The pre-trained MusicGen models use a frozen text encoder from either T5 or Flan-T5. +2. MusicGen Melody decoder: a language model (LM) that auto-regressively generates audio tokens (or codes) conditional on the encoder hidden-state representations +3. Audio decoder: used to recover the audio waveform from the audio tokens predicted by the decoder. + +Thus, the MusicGen model can either be used as a standalone decoder model, corresponding to the class [`MusicgenMelodyForCausalLM`], or as a composite model that includes the text encoder and audio encoder, corresponding to the class [`MusicgenMelodyForConditionalGeneration`]. If only the decoder needs to be loaded from the pre-trained checkpoint, it can be loaded by first specifying the correct config, or be accessed through the `.decoder` attribute of the composite model: + +```python +>>> from transformers import AutoConfig, MusicgenMelodyForCausalLM, MusicgenMelodyForConditionalGeneration + +>>> # Option 1: get decoder config and pass to `.from_pretrained` +>>> decoder_config = AutoConfig.from_pretrained("facebook/musicgen-melody").decoder +>>> decoder = MusicgenMelodyForCausalLM.from_pretrained("facebook/musicgen-melody", **decoder_config.to_dict()) + +>>> # Option 2: load the entire composite model, but only return the decoder +>>> decoder = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody").decoder +``` + +Since the text encoder and audio encoder models are frozen during training, the MusicGen decoder [`MusicgenMelodyForCausalLM`] can be trained standalone on a dataset of encoder hidden-states and audio codes. For inference, the trained decoder can be combined with the frozen text encoder and audio encoder to recover the composite [`MusicgenMelodyForConditionalGeneration`] model. + +## Checkpoint Conversion + +- After downloading the original checkpoints from [here](https://github.com/facebookresearch/audiocraft/blob/main/docs/MUSICGEN.md#importing--exporting-models), you can convert them using the **conversion script** available at `src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py` with the following command: + +```bash +python src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py \ + --checkpoint="facebook/musicgen-melody" --pytorch_dump_folder /output/path +``` + +Tips: +* MusicGen is trained on the 32kHz checkpoint of Encodec. You should ensure you use a compatible version of the Encodec model. +* Sampling mode tends to deliver better results than greedy - you can toggle sampling with the variable `do_sample` in the call to [`MusicgenMelodyForConditionalGeneration.generate`] + + +## MusicgenMelodyDecoderConfig + +[[autodoc]] MusicgenMelodyDecoderConfig + +## MusicgenMelodyProcessor + +[[autodoc]] MusicgenMelodyProcessor + - get_unconditional_inputs + +## MusicgenMelodyFeatureExtractor + +[[autodoc]] MusicgenMelodyFeatureExtractor + - _extract_stem_indices + +## MusicgenMelodyConfig + +[[autodoc]] MusicgenMelodyConfig + +## MusicgenMelodyModel + +[[autodoc]] MusicgenMelodyModel + - forward + +## MusicgenMelodyForCausalLM + +[[autodoc]] MusicgenMelodyForCausalLM + - forward + +## MusicgenMelodyForConditionalGeneration + +[[autodoc]] MusicgenMelodyForConditionalGeneration + - forward \ No newline at end of file diff --git a/docs/source/en/tasks/language_modeling.md b/docs/source/en/tasks/language_modeling.md index 0de54b23331eb7..9c055ae5f59f63 100644 --- a/docs/source/en/tasks/language_modeling.md +++ b/docs/source/en/tasks/language_modeling.md @@ -37,7 +37,7 @@ You can finetune other architectures for causal language modeling following the Choose one of the following architectures: -[BART](../model_doc/bart), [BERT](../model_doc/bert), [Bert Generation](../model_doc/bert-generation), [BigBird](../model_doc/big_bird), [BigBird-Pegasus](../model_doc/bigbird_pegasus), [BioGpt](../model_doc/biogpt), [Blenderbot](../model_doc/blenderbot), [BlenderbotSmall](../model_doc/blenderbot-small), [BLOOM](../model_doc/bloom), [CamemBERT](../model_doc/camembert), [CodeLlama](../model_doc/code_llama), [CodeGen](../model_doc/codegen), [Cohere](../model_doc/cohere), [CPM-Ant](../model_doc/cpmant), [CTRL](../model_doc/ctrl), [Data2VecText](../model_doc/data2vec-text), [ELECTRA](../model_doc/electra), [ERNIE](../model_doc/ernie), [Falcon](../model_doc/falcon), [Fuyu](../model_doc/fuyu), [Gemma](../model_doc/gemma), [GIT](../model_doc/git), [GPT-Sw3](../model_doc/gpt-sw3), [OpenAI GPT-2](../model_doc/gpt2), [GPTBigCode](../model_doc/gpt_bigcode), [GPT Neo](../model_doc/gpt_neo), [GPT NeoX](../model_doc/gpt_neox), [GPT NeoX Japanese](../model_doc/gpt_neox_japanese), [GPT-J](../model_doc/gptj), [LLaMA](../model_doc/llama), [Mamba](../model_doc/mamba), [Marian](../model_doc/marian), [mBART](../model_doc/mbart), [MEGA](../model_doc/mega), [Megatron-BERT](../model_doc/megatron-bert), [Mistral](../model_doc/mistral), [Mixtral](../model_doc/mixtral), [MPT](../model_doc/mpt), [MusicGen](../model_doc/musicgen), [MVP](../model_doc/mvp), [OpenLlama](../model_doc/open-llama), [OpenAI GPT](../model_doc/openai-gpt), [OPT](../model_doc/opt), [Pegasus](../model_doc/pegasus), [Persimmon](../model_doc/persimmon), [Phi](../model_doc/phi), [PLBart](../model_doc/plbart), [ProphetNet](../model_doc/prophetnet), [QDQBert](../model_doc/qdqbert), [Qwen2](../model_doc/qwen2), [Reformer](../model_doc/reformer), [RemBERT](../model_doc/rembert), [RoBERTa](../model_doc/roberta), [RoBERTa-PreLayerNorm](../model_doc/roberta-prelayernorm), [RoCBert](../model_doc/roc_bert), [RoFormer](../model_doc/roformer), [RWKV](../model_doc/rwkv), [Speech2Text2](../model_doc/speech_to_text_2), [StableLm](../model_doc/stablelm), [Starcoder2](../model_doc/starcoder2), [Transformer-XL](../model_doc/transfo-xl), [TrOCR](../model_doc/trocr), [Whisper](../model_doc/whisper), [XGLM](../model_doc/xglm), [XLM](../model_doc/xlm), [XLM-ProphetNet](../model_doc/xlm-prophetnet), [XLM-RoBERTa](../model_doc/xlm-roberta), [XLM-RoBERTa-XL](../model_doc/xlm-roberta-xl), [XLNet](../model_doc/xlnet), [X-MOD](../model_doc/xmod) +[BART](../model_doc/bart), [BERT](../model_doc/bert), [Bert Generation](../model_doc/bert-generation), [BigBird](../model_doc/big_bird), [BigBird-Pegasus](../model_doc/bigbird_pegasus), [BioGpt](../model_doc/biogpt), [Blenderbot](../model_doc/blenderbot), [BlenderbotSmall](../model_doc/blenderbot-small), [BLOOM](../model_doc/bloom), [CamemBERT](../model_doc/camembert), [CodeLlama](../model_doc/code_llama), [CodeGen](../model_doc/codegen), [Cohere](../model_doc/cohere), [CPM-Ant](../model_doc/cpmant), [CTRL](../model_doc/ctrl), [Data2VecText](../model_doc/data2vec-text), [ELECTRA](../model_doc/electra), [ERNIE](../model_doc/ernie), [Falcon](../model_doc/falcon), [Fuyu](../model_doc/fuyu), [Gemma](../model_doc/gemma), [GIT](../model_doc/git), [GPT-Sw3](../model_doc/gpt-sw3), [OpenAI GPT-2](../model_doc/gpt2), [GPTBigCode](../model_doc/gpt_bigcode), [GPT Neo](../model_doc/gpt_neo), [GPT NeoX](../model_doc/gpt_neox), [GPT NeoX Japanese](../model_doc/gpt_neox_japanese), [GPT-J](../model_doc/gptj), [LLaMA](../model_doc/llama), [Mamba](../model_doc/mamba), [Marian](../model_doc/marian), [mBART](../model_doc/mbart), [MEGA](../model_doc/mega), [Megatron-BERT](../model_doc/megatron-bert), [Mistral](../model_doc/mistral), [Mixtral](../model_doc/mixtral), [MPT](../model_doc/mpt), [MusicGen](../model_doc/musicgen), [MusicGen Melody](../model_doc/musicgen_melody), [MVP](../model_doc/mvp), [OpenLlama](../model_doc/open-llama), [OpenAI GPT](../model_doc/openai-gpt), [OPT](../model_doc/opt), [Pegasus](../model_doc/pegasus), [Persimmon](../model_doc/persimmon), [Phi](../model_doc/phi), [PLBart](../model_doc/plbart), [ProphetNet](../model_doc/prophetnet), [QDQBert](../model_doc/qdqbert), [Qwen2](../model_doc/qwen2), [Reformer](../model_doc/reformer), [RemBERT](../model_doc/rembert), [RoBERTa](../model_doc/roberta), [RoBERTa-PreLayerNorm](../model_doc/roberta-prelayernorm), [RoCBert](../model_doc/roc_bert), [RoFormer](../model_doc/roformer), [RWKV](../model_doc/rwkv), [Speech2Text2](../model_doc/speech_to_text_2), [StableLm](../model_doc/stablelm), [Starcoder2](../model_doc/starcoder2), [Transformer-XL](../model_doc/transfo-xl), [TrOCR](../model_doc/trocr), [Whisper](../model_doc/whisper), [XGLM](../model_doc/xglm), [XLM](../model_doc/xlm), [XLM-ProphetNet](../model_doc/xlm-prophetnet), [XLM-RoBERTa](../model_doc/xlm-roberta), [XLM-RoBERTa-XL](../model_doc/xlm-roberta-xl), [XLNet](../model_doc/xlnet), [X-MOD](../model_doc/xmod) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index a9c37c07a1be2d..631276d3c19b3b 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -42,6 +42,7 @@ is_timm_available, is_tokenizers_available, is_torch_available, + is_torchaudio_available, is_torchvision_available, is_vision_available, logging, @@ -638,6 +639,11 @@ "MusicgenConfig", "MusicgenDecoderConfig", ], + "models.musicgen_melody": [ + "MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST", + "MusicgenMelodyConfig", + "MusicgenMelodyDecoderConfig", + ], "models.mvp": ["MvpConfig", "MvpTokenizer"], "models.nat": ["NAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "NatConfig"], "models.nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], @@ -2787,6 +2793,15 @@ "MusicgenProcessor", ] ) + _import_structure["models.musicgen_melody"].extend( + [ + "MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST", + "MusicgenMelodyForCausalLM", + "MusicgenMelodyForConditionalGeneration", + "MusicgenMelodyModel", + "MusicgenMelodyPreTrainedModel", + ] + ) _import_structure["models.mvp"].extend( [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -4552,6 +4567,21 @@ _import_structure["models.pop2piano"].append("Pop2PianoTokenizer") _import_structure["models.pop2piano"].append("Pop2PianoProcessor") +try: + if not is_torchaudio_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import ( + dummy_torchaudio_objects, + ) + + _import_structure["utils.dummy_torchaudio_objects"] = [ + name for name in dir(dummy_torchaudio_objects) if not name.startswith("_") + ] +else: + _import_structure["models.musicgen_melody"].append("MusicgenMelodyFeatureExtractor") + _import_structure["models.musicgen_melody"].append("MusicgenMelodyProcessor") + # FLAX-backed objects try: @@ -5459,6 +5489,11 @@ MusicgenConfig, MusicgenDecoderConfig, ) + from .models.musicgen_melody import ( + MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST, + MusicgenMelodyConfig, + MusicgenMelodyDecoderConfig, + ) from .models.mvp import MvpConfig, MvpTokenizer from .models.nat import NAT_PRETRAINED_CONFIG_ARCHIVE_MAP, NatConfig from .models.nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig @@ -7360,6 +7395,13 @@ MusicgenPreTrainedModel, MusicgenProcessor, ) + from .models.musicgen_melody import ( + MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST, + MusicgenMelodyForCausalLM, + MusicgenMelodyForConditionalGeneration, + MusicgenMelodyModel, + MusicgenMelodyPreTrainedModel, + ) from .models.mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, @@ -8811,6 +8853,13 @@ Pop2PianoTokenizer, ) + try: + if not is_torchaudio_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torchaudio_objects import * + else: + from .models.musicgen_melody import MusicgenMelodyFeatureExtractor, MusicgenMelodyProcessor try: if not is_flax_available(): raise OptionalDependencyNotAvailable() diff --git a/src/transformers/audio_utils.py b/src/transformers/audio_utils.py index a76e671712f40d..c5c73550c1c346 100644 --- a/src/transformers/audio_utils.py +++ b/src/transformers/audio_utils.py @@ -17,7 +17,7 @@ and remove unnecessary dependencies. """ import warnings -from typing import Optional, Union +from typing import Optional, Tuple, Union import numpy as np @@ -94,6 +94,29 @@ def mel_to_hertz(mels: Union[float, np.ndarray], mel_scale: str = "htk") -> Unio return freq +def hertz_to_octave( + freq: Union[float, np.ndarray], tuning: Optional[float] = 0.0, bins_per_octave: Optional[int] = 12 +): + """ + Convert frequency from hertz to fractional octave numbers. + Adapted from *librosa*. + + Args: + freq (`float` or `np.ndarray`): + The frequency, or multiple frequencies, in hertz (Hz). + tuning (`float`, defaults to `0.`): + Tuning deviation from the Stuttgart pitch (A440) in (fractional) bins per octave. + bins_per_octave (`int`, defaults to `12`): + Number of bins per octave. + + Returns: + `float` or `np.ndarray`: The frequencies on the octave scale. + """ + stuttgart_pitch = 440.0 * 2.0 ** (tuning / bins_per_octave) + octave = np.log2(freq / (float(stuttgart_pitch) / 16)) + return octave + + def _create_triangular_filter_bank(fft_freqs: np.ndarray, filter_freqs: np.ndarray) -> np.ndarray: """ Creates a triangular filter bank. @@ -116,6 +139,81 @@ def _create_triangular_filter_bank(fft_freqs: np.ndarray, filter_freqs: np.ndarr return np.maximum(np.zeros(1), np.minimum(down_slopes, up_slopes)) +def chroma_filter_bank( + num_frequency_bins: int, + num_chroma: int, + sampling_rate: int, + tuning: float = 0.0, + power: Optional[float] = 2.0, + weighting_parameters: Optional[Tuple[float]] = (5.0, 2), + start_at_c_chroma: Optional[bool] = True, +): + """ + Creates a chroma filter bank, i.e a linear transformation to project spectrogram bins onto chroma bins. + + Adapted from *librosa*. + + Args: + num_frequency_bins (`int`): + Number of frequencies used to compute the spectrogram (should be the same as in `stft`). + num_chroma (`int`): + Number of chroma bins (i.e pitch classes). + sampling_rate (`float`): + Sample rate of the audio waveform. + tuning (`float`): + Tuning deviation from A440 in fractions of a chroma bin. + power (`float`, *optional*, defaults to 2.0): + If 12.0, normalizes each column with their L2 norm. If 1.0, normalizes each column with their L1 norm. + weighting_parameters (`Tuple[float]`, *optional*, defaults to `(5., 2.)`): + If specified, apply a Gaussian weighting parameterized by the first element of the tuple being the center and + the second element being the Gaussian half-width. + start_at_c_chroma (`float`, *optional*, defaults to `True`): + If True, the filter bank will start at the 'C' pitch class. Otherwise, it will start at 'A'. + Returns: + `np.ndarray` of shape `(num_frequency_bins, num_chroma)` + """ + # Get the FFT bins, not counting the DC component + frequencies = np.linspace(0, sampling_rate, num_frequency_bins, endpoint=False)[1:] + + freq_bins = num_chroma * hertz_to_octave(frequencies, tuning=tuning, bins_per_octave=num_chroma) + + # make up a value for the 0 Hz bin = 1.5 octaves below bin 1 + # (so chroma is 50% rotated from bin 1, and bin width is broad) + freq_bins = np.concatenate(([freq_bins[0] - 1.5 * num_chroma], freq_bins)) + + bins_width = np.concatenate((np.maximum(freq_bins[1:] - freq_bins[:-1], 1.0), [1])) + + chroma_filters = np.subtract.outer(freq_bins, np.arange(0, num_chroma, dtype="d")).T + + num_chroma2 = np.round(float(num_chroma) / 2) + + # Project into range -num_chroma/2 .. num_chroma/2 + # add on fixed offset of 10*num_chroma to ensure all values passed to + # rem are positive + chroma_filters = np.remainder(chroma_filters + num_chroma2 + 10 * num_chroma, num_chroma) - num_chroma2 + + # Gaussian bumps - 2*D to make them narrower + chroma_filters = np.exp(-0.5 * (2 * chroma_filters / np.tile(bins_width, (num_chroma, 1))) ** 2) + + # normalize each column + if power is not None: + chroma_filters = chroma_filters / np.sum(chroma_filters**power, axis=0, keepdims=True) ** (1.0 / power) + + # Maybe apply scaling for fft bins + if weighting_parameters is not None: + center, half_width = weighting_parameters + chroma_filters *= np.tile( + np.exp(-0.5 * (((freq_bins / num_chroma - center) / half_width) ** 2)), + (num_chroma, 1), + ) + + if start_at_c_chroma: + chroma_filters = np.roll(chroma_filters, -3 * (num_chroma // 12), axis=0) + + # remove aliasing columns, copy to ensure row-contiguity + return np.ascontiguousarray(chroma_filters[:, : int(1 + num_frequency_bins / 2)]) + + def mel_filter_bank( num_frequency_bins: int, num_mel_filters: int, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index dfd887e3d06104..d0f3ba2688e207 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -153,6 +153,7 @@ mra, mt5, musicgen, + musicgen_melody, mvp, nat, nezha, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 626a267a805ec4..cd5be302c42986 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -161,6 +161,7 @@ ("mra", "MraConfig"), ("mt5", "MT5Config"), ("musicgen", "MusicgenConfig"), + ("musicgen_melody", "MusicgenMelodyConfig"), ("mvp", "MvpConfig"), ("nat", "NatConfig"), ("nezha", "NezhaConfig"), @@ -396,6 +397,7 @@ ("mpt", "MPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("mra", "MRA_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("musicgen", "MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("musicgen_melody", "MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("mvp", "MVP_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("nat", "NAT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("nezha", "NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -649,6 +651,7 @@ ("mra", "MRA"), ("mt5", "MT5"), ("musicgen", "MusicGen"), + ("musicgen_melody", "MusicGen Melody"), ("mvp", "MVP"), ("nat", "NAT"), ("nezha", "Nezha"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index e05204561a18dd..d2cb6882cd351c 100755 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -454,6 +454,7 @@ ("mixtral", "MixtralForCausalLM"), ("mpt", "MptForCausalLM"), ("musicgen", "MusicgenForCausalLM"), + ("musicgen_melody", "MusicgenMelodyForCausalLM"), ("mvp", "MvpForCausalLM"), ("open-llama", "OpenLlamaForCausalLM"), ("openai-gpt", "OpenAIGPTLMHeadModel"), @@ -1176,6 +1177,7 @@ ("bark", "BarkModel"), ("fastspeech2_conformer", "FastSpeech2ConformerWithHifiGan"), ("musicgen", "MusicgenForConditionalGeneration"), + ("musicgen_melody", "MusicgenMelodyForConditionalGeneration"), ("seamless_m4t", "SeamlessM4TForTextToSpeech"), ("seamless_m4t_v2", "SeamlessM4Tv2ForTextToSpeech"), ("vits", "VitsModel"), diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index 10948d4ef7b3f7..ca337ec0272e3a 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -280,6 +280,7 @@ ), ), ("musicgen", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)), + ("musicgen_melody", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)), ("mvp", ("MvpTokenizer", "MvpTokenizerFast" if is_tokenizers_available() else None)), ("nezha", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), ( diff --git a/src/transformers/models/musicgen_melody/__init__.py b/src/transformers/models/musicgen_melody/__init__.py new file mode 100644 index 00000000000000..082c8f4ea66ea4 --- /dev/null +++ b/src/transformers/models/musicgen_melody/__init__.py @@ -0,0 +1,90 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_torchaudio_available, +) + + +_import_structure = { + "configuration_musicgen_melody": [ + "MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MusicgenMelodyConfig", + "MusicgenMelodyDecoderConfig", + ], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_musicgen_melody"] = [ + "MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST", + "MusicgenMelodyForConditionalGeneration", + "MusicgenMelodyForCausalLM", + "MusicgenMelodyModel", + "MusicgenMelodyPreTrainedModel", + ] + +try: + if not is_torchaudio_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["feature_extraction_musicgen_melody"] = ["MusicgenMelodyFeatureExtractor"] + _import_structure["processing_musicgen_melody"] = ["MusicgenMelodyProcessor"] + + +if TYPE_CHECKING: + from .configuration_musicgen_melody import ( + MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP, + MusicgenMelodyConfig, + MusicgenMelodyDecoderConfig, + ) + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_musicgen_melody import ( + MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST, + MusicgenMelodyForCausalLM, + MusicgenMelodyForConditionalGeneration, + MusicgenMelodyModel, + MusicgenMelodyPreTrainedModel, + ) + + try: + if not is_torchaudio_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .feature_extraction_musicgen_melody import MusicgenMelodyFeatureExtractor + from .processing_musicgen_melody import MusicgenMelodyProcessor + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py b/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py new file mode 100644 index 00000000000000..89459371299ff7 --- /dev/null +++ b/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py @@ -0,0 +1,256 @@ +# coding=utf-8 +# Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Musicgen Melody model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging +from ..auto.configuration_auto import AutoConfig + + +logger = logging.get_logger(__name__) + +MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "facebook/musicgen-melody": "https://huggingface.co/facebook/musicgen-melody/resolve/main/config.json", +} + + +class MusicgenMelodyDecoderConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of an [`MusicgenMelodyDecoder`]. It is used to instantiate a + Musicgen Melody decoder according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the Musicgen Melody + [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 2048): + Vocabulary size of the MusicgenMelodyDecoder model. Defines the number of different tokens that can be + represented by the `inputs_ids` passed when calling [`MusicgenMelodyDecoder`]. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. Typically, set this to something large + just in case (e.g., 512 or 1024 or 2048). + num_hidden_layers (`int`, *optional*, defaults to 24): + Number of decoder layers. + ffn_dim (`int`, *optional*, defaults to 4096): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block. + num_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer block. + layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + use_cache (`bool`, *optional*, defaults to `True`): + Whether the model should return the last key/values attentions (not used by all models) + activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_size (`int`, *optional*, defaults to 1024): + Dimensionality of the layers and the pooler layer. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + activation_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for activations inside the fully connected layer. + initializer_factor (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + scale_embedding (`bool`, *optional*, defaults to `False`): + Scale embeddings by diving by sqrt(hidden_size). + num_codebooks (`int`, *optional*, defaults to 4): + The number of parallel codebooks forwarded to the model. + audio_channels (`int`, *optional*, defaults to 1): + Number of audio channels used by the model (either mono or stereo). Stereo models generate a separate + audio stream for the left/right output channels. Mono models generate a single audio stream output. + pad_token_id (`int`, *optional*, defaults to 2048): The id of the *padding* token. + bos_token_id (`int`, *optional*, defaults to 2048): The id of the *beginning-of-sequence* token. + eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. + tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie word embeddings with the text encoder. + """ + + model_type = "musicgen_melody_decoder" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=2048, + max_position_embeddings=2048, + num_hidden_layers=24, + ffn_dim=4096, + num_attention_heads=16, + layerdrop=0.0, + use_cache=True, + activation_function="gelu", + hidden_size=1024, + dropout=0.1, + attention_dropout=0.0, + activation_dropout=0.0, + initializer_factor=0.02, + scale_embedding=False, + num_codebooks=4, + audio_channels=1, + pad_token_id=2048, + bos_token_id=2048, + eos_token_id=None, + tie_word_embeddings=False, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.ffn_dim = ffn_dim + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.initializer_factor = initializer_factor + self.layerdrop = layerdrop + self.use_cache = use_cache + self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True + self.num_codebooks = num_codebooks + + if audio_channels not in [1, 2]: + raise ValueError(f"Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.") + self.audio_channels = audio_channels + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + +class MusicgenMelodyConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`MusicgenMelodyModel`]. It is used to instantiate a + Musicgen Melody model according to the specified arguments, defining the text encoder, audio encoder and Musicgen Melody decoder + configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Musicgen Melody + [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + num_chroma (`int`, *optional*, defaults to 12): Number of chroma bins to use. + chroma_length (`int`, *optional*, defaults to 235): + Maximum chroma duration if audio is used to condition the model. Corresponds to the maximum duration used during training. + kwargs (*optional*): + Dictionary of keyword arguments. Notably: + + - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that + defines the text encoder config. + - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that + defines the audio encoder config. + - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines + the decoder config. + + Example: + + ```python + >>> from transformers import ( + ... MusicgenMelodyConfig, + ... MusicgenMelodyDecoderConfig, + ... T5Config, + ... EncodecConfig, + ... MusicgenMelodyForConditionalGeneration, + ... ) + + >>> # Initializing text encoder, audio encoder, and decoder model configurations + >>> text_encoder_config = T5Config() + >>> audio_encoder_config = EncodecConfig() + >>> decoder_config = MusicgenMelodyDecoderConfig() + + >>> configuration = MusicgenMelodyConfig.from_sub_models_config( + ... text_encoder_config, audio_encoder_config, decoder_config + ... ) + + >>> # Initializing a MusicgenMelodyForConditionalGeneration (with random weights) from the facebook/musicgen-melody style configuration + >>> model = MusicgenMelodyForConditionalGeneration(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + >>> config_text_encoder = model.config.text_encoder + >>> config_audio_encoder = model.config.audio_encoder + >>> config_decoder = model.config.decoder + + >>> # Saving the model, including its configuration + >>> model.save_pretrained("musicgen_melody-model") + + >>> # loading model and config from pretrained folder + >>> musicgen_melody_config = MusicgenMelodyConfig.from_pretrained("musicgen_melody-model") + >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("musicgen_melody-model", config=musicgen_melody_config) + ```""" + + model_type = "musicgen_melody" + is_composition = True + + def __init__( + self, + num_chroma=12, + chroma_length=235, + **kwargs, + ): + super().__init__(**kwargs) + if "text_encoder" not in kwargs or "audio_encoder" not in kwargs or "decoder" not in kwargs: + raise ValueError("Config has to be initialized with text_encoder, audio_encoder and decoder config") + + text_encoder_config = kwargs.pop("text_encoder") + text_encoder_model_type = text_encoder_config.pop("model_type") + + audio_encoder_config = kwargs.pop("audio_encoder") + audio_encoder_model_type = audio_encoder_config.pop("model_type") + + decoder_config = kwargs.pop("decoder") + + self.text_encoder = AutoConfig.for_model(text_encoder_model_type, **text_encoder_config) + self.audio_encoder = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config) + self.decoder = MusicgenMelodyDecoderConfig(**decoder_config) + self.is_encoder_decoder = False + + self.num_chroma = num_chroma + self.chroma_length = chroma_length + + @classmethod + def from_sub_models_config( + cls, + text_encoder_config: PretrainedConfig, + audio_encoder_config: PretrainedConfig, + decoder_config: MusicgenMelodyDecoderConfig, + **kwargs, + ): + r""" + Instantiate a [`MusicgenMelodyConfig`] (or a derived class) from text encoder, audio encoder and decoder + configurations. + + Returns: + [`MusicgenMelodyConfig`]: An instance of a configuration object + """ + + return cls( + text_encoder=text_encoder_config.to_dict(), + audio_encoder=audio_encoder_config.to_dict(), + decoder=decoder_config.to_dict(), + **kwargs, + ) + + @property + # This is a property because you might want to change the codec model on the fly + def sampling_rate(self): + return self.audio_encoder.sampling_rate diff --git a/src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py b/src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py new file mode 100644 index 00000000000000..9e224d93f1526a --- /dev/null +++ b/src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py @@ -0,0 +1,266 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Musicgen Melody checkpoints from the original repository.""" +import argparse +from pathlib import Path +from typing import Dict, OrderedDict, Tuple + +import torch +from audiocraft.models import MusicGen + +from transformers import ( + AutoTokenizer, + EncodecModel, + T5EncoderModel, +) +from transformers.models.musicgen_melody.configuration_musicgen_melody import MusicgenMelodyDecoderConfig +from transformers.models.musicgen_melody.feature_extraction_musicgen_melody import MusicgenMelodyFeatureExtractor +from transformers.models.musicgen_melody.modeling_musicgen_melody import ( + MusicgenMelodyForCausalLM, + MusicgenMelodyForConditionalGeneration, +) +from transformers.models.musicgen_melody.processing_musicgen_melody import MusicgenMelodyProcessor +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +EXPECTED_MISSING_KEYS = ["model.decoder.embed_positions.weights"] +EXPECTED_ADDITIONAL_KEYS = ["condition_provider.conditioners.self_wav.chroma.spec.window"] + + +def rename_keys(name): + if "emb" in name: + name = name.replace("emb", "model.decoder.embed_tokens") + if "transformer" in name: + name = name.replace("transformer", "model.decoder") + if "cross_attention" in name: + name = name.replace("cross_attention", "encoder_attn") + if "linear1" in name: + name = name.replace("linear1", "fc1") + if "linear2" in name: + name = name.replace("linear2", "fc2") + if "norm1" in name: + name = name.replace("norm1", "self_attn_layer_norm") + if "norm_cross" in name: + name = name.replace("norm_cross", "encoder_attn_layer_norm") + if "norm2" in name: + name = name.replace("norm2", "final_layer_norm") + if "out_norm" in name: + name = name.replace("out_norm", "model.decoder.layer_norm") + if "linears" in name: + name = name.replace("linears", "lm_heads") + if "condition_provider.conditioners.description.output_proj" in name: + name = name.replace("condition_provider.conditioners.description.output_proj", "enc_to_dec_proj") + if "condition_provider.conditioners.self_wav.output_proj" in name: + name = name.replace("condition_provider.conditioners.self_wav.output_proj", "audio_enc_to_dec_proj") + return name + + +def rename_state_dict(state_dict: OrderedDict, hidden_size: int) -> Tuple[Dict, Dict]: + """Function that takes the fairseq MusicgenMelody state dict and renames it according to the HF + module names. It further partitions the state dict into the decoder (LM) state dict, and that for the + text encoder projection and for the audio encoder projection.""" + keys = list(state_dict.keys()) + enc_dec_proj_state_dict = {} + audio_enc_to_dec_proj_state_dict = {} + for key in keys: + val = state_dict.pop(key) + key = rename_keys(key) + if "in_proj_weight" in key: + # split fused qkv proj + state_dict[key.replace("in_proj_weight", "q_proj.weight")] = val[:hidden_size, :] + state_dict[key.replace("in_proj_weight", "k_proj.weight")] = val[hidden_size : 2 * hidden_size, :] + state_dict[key.replace("in_proj_weight", "v_proj.weight")] = val[-hidden_size:, :] + elif "audio_enc_to_dec_proj" in key: + audio_enc_to_dec_proj_state_dict[key[len("audio_enc_to_dec_proj.") :]] = val + elif "enc_to_dec_proj" in key: + enc_dec_proj_state_dict[key[len("enc_to_dec_proj.") :]] = val + else: + state_dict[key] = val + return state_dict, enc_dec_proj_state_dict, audio_enc_to_dec_proj_state_dict + + +def decoder_config_from_checkpoint(checkpoint: str) -> MusicgenMelodyDecoderConfig: + if checkpoint == "facebook/musicgen-melody" or checkpoint == "facebook/musicgen-stereo-melody": + hidden_size = 1536 + num_hidden_layers = 48 + num_attention_heads = 24 + elif checkpoint == "facebook/musicgen-melody-large" or checkpoint == "facebook/musicgen-stereo-melody-large": + hidden_size = 2048 + num_hidden_layers = 48 + num_attention_heads = 32 + else: + raise ValueError( + "Checkpoint should be one of `['facebook/musicgen-melody', 'facebook/musicgen-melody-large']` for the mono checkpoints, " + "or `['facebook/musicgen-stereo-melody', 'facebook/musicgen-stereo-melody-large']` " + f"for the stereo checkpoints, got {checkpoint}." + ) + + if "stereo" in checkpoint: + audio_channels = 2 + num_codebooks = 8 + else: + audio_channels = 1 + num_codebooks = 4 + + config = MusicgenMelodyDecoderConfig( + hidden_size=hidden_size, + ffn_dim=hidden_size * 4, + num_hidden_layers=num_hidden_layers, + num_attention_heads=num_attention_heads, + num_codebooks=num_codebooks, + audio_channels=audio_channels, + ) + return config + + +@torch.no_grad() +def convert_musicgen_melody_checkpoint( + checkpoint, pytorch_dump_folder=None, repo_id=None, device="cpu", test_same_output=False +): + fairseq_model = MusicGen.get_pretrained(checkpoint, device=args.device) + decoder_config = decoder_config_from_checkpoint(checkpoint) + + decoder_state_dict = fairseq_model.lm.state_dict() + decoder_state_dict, enc_dec_proj_state_dict, audio_enc_to_dec_proj_state_dict = rename_state_dict( + decoder_state_dict, hidden_size=decoder_config.hidden_size + ) + + text_encoder = T5EncoderModel.from_pretrained("t5-base") + audio_encoder = EncodecModel.from_pretrained("facebook/encodec_32khz") + decoder = MusicgenMelodyForCausalLM(decoder_config).eval() + + # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection + missing_keys, unexpected_keys = decoder.load_state_dict(decoder_state_dict, strict=False) + + for key in missing_keys.copy(): + if key.startswith(("text_encoder", "audio_encoder")) or key in EXPECTED_MISSING_KEYS: + missing_keys.remove(key) + + for key in unexpected_keys.copy(): + if key in EXPECTED_ADDITIONAL_KEYS: + unexpected_keys.remove(key) + + if len(missing_keys) > 0: + raise ValueError(f"Missing key(s) in state_dict: {missing_keys}") + + if len(unexpected_keys) > 0: + raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}") + + # init the composite model + model = MusicgenMelodyForConditionalGeneration( + text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder + ).to(args.device) + + # load the pre-trained enc-dec projection (from the decoder state dict) + model.enc_to_dec_proj.load_state_dict(enc_dec_proj_state_dict) + + # load the pre-trained audio encoder projection (from the decoder state dict) + model.audio_enc_to_dec_proj.load_state_dict(audio_enc_to_dec_proj_state_dict) + + # check we can do a forward pass + input_ids = torch.arange(0, 2 * decoder_config.num_codebooks, dtype=torch.long).reshape(2, -1).to(device) + decoder_input_ids = input_ids.reshape(2 * decoder_config.num_codebooks, -1).to(device) + + with torch.no_grad(): + logits = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits + + output_length = 1 + input_ids.shape[1] + model.config.chroma_length + if logits.shape != (2 * decoder_config.num_codebooks, output_length, 2048): + raise ValueError("Incorrect shape for logits") + + # now construct the processor + tokenizer = AutoTokenizer.from_pretrained("t5-base") + feature_extractor = MusicgenMelodyFeatureExtractor() + + processor = MusicgenMelodyProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer) + + # set the appropriate bos/pad token ids + model.generation_config.decoder_start_token_id = 2048 + model.generation_config.pad_token_id = 2048 + + # set other default generation config params + model.generation_config.max_length = int(30 * audio_encoder.config.frame_rate) + model.generation_config.do_sample = True + model.generation_config.guidance_scale = 3.0 + + if test_same_output: + # check same output than original model + decoder_input_ids = torch.ones_like(decoder_input_ids).to(device) * model.generation_config.pad_token_id + with torch.no_grad(): + decoder_input_ids = decoder_input_ids[: decoder_config.num_codebooks] + inputs = processor(text=["gen"], return_tensors="pt", padding=True).to(device) + logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits + + attributes, prompt_tokens = fairseq_model._prepare_tokens_and_attributes(["gen"], None) + original_logits = fairseq_model.lm.forward( + decoder_input_ids.reshape(1, decoder_config.num_codebooks, -1), attributes + ) + + torch.testing.assert_close( + original_logits.squeeze(2).reshape(decoder_config.num_codebooks, -1), + logits[:, -1], + rtol=1e-5, + atol=5e-5, + ) + + if pytorch_dump_folder is not None: + Path(pytorch_dump_folder).mkdir(exist_ok=True) + logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}") + model.save_pretrained(pytorch_dump_folder) + processor.save_pretrained(pytorch_dump_folder) + + if repo_id: + logger.info(f"Pushing model {checkpoint} to {repo_id}") + model.push_to_hub(repo_id, create_pr=True) + processor.push_to_hub(repo_id, create_pr=True) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--checkpoint", + default="facebook/musicgen-melody", + type=str, + help="Checkpoint size of the Musicgen Melody model you'd like to convert. Can be one of: " + "`['facebook/musicgen-melody', 'facebook/musicgen-melody-large']` for the mono checkpoints, or " + "`['facebook/musicgen-stereo-melody', 'facebook/musicgen-stereo-melody-large']` " + "for the stereo checkpoints.", + ) + parser.add_argument( + "--pytorch_dump_folder", + default=None, + type=str, + help="Path to the output PyTorch model directory.", + ) + parser.add_argument( + "--push_to_hub", + default="musicgen-melody", + type=str, + help="Where to upload the converted model on the 🤗 hub.", + ) + parser.add_argument( + "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." + ) + parser.add_argument("--test_same_output", default=False, type=bool, help="If `True`, test if same output logits.") + + args = parser.parse_args() + convert_musicgen_melody_checkpoint( + args.checkpoint, args.pytorch_dump_folder, args.push_to_hub, args.device, args.test_same_output + ) diff --git a/src/transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py b/src/transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py new file mode 100644 index 00000000000000..2013309da50686 --- /dev/null +++ b/src/transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py @@ -0,0 +1,330 @@ +# coding=utf-8 +# Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Feature extractor class for Musicgen Melody +""" +import copy +from typing import Any, Dict, List, Optional, Union + +import numpy as np + +from ...audio_utils import chroma_filter_bank +from ...feature_extraction_sequence_utils import SequenceFeatureExtractor +from ...feature_extraction_utils import BatchFeature +from ...utils import TensorType, is_torch_available, is_torchaudio_available, logging + + +if is_torch_available(): + import torch + +if is_torchaudio_available(): + import torchaudio + +logger = logging.get_logger(__name__) + + +class MusicgenMelodyFeatureExtractor(SequenceFeatureExtractor): + r""" + Constructs a MusicgenMelody feature extractor. + + This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains + most of the main methods. Users should refer to this superclass for more information regarding those methods. + + This class extracts chroma features from audio processed by [Demucs](https://github.com/adefossez/demucs/tree/main) or + directly from raw audio waveform. + + Args: + feature_size (`int`, *optional*, defaults to 12): + The feature dimension of the extracted features. + sampling_rate (`int`, *optional*, defaults to 32000): + The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). + hop_length (`int`, *optional*, defaults to 4096): + Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients. + chunk_length (`int`, *optional*, defaults to 30): + The maximum number of chunks of `sampling_rate` samples used to trim and pad longer or shorter audio + sequences. + n_fft (`int`, *optional*, defaults to 16384): + Size of the Fourier transform. + num_chroma (`int`, *optional*, defaults to 12): + Number of chroma bins to use. + padding_value (`float`, *optional*, defaults to 0.0): + Padding value used to pad the audio. + return_attention_mask (`bool`, *optional*, defaults to `False`): + Whether to return the attention mask. Can be overwritten when calling the feature extractor. + + [What are attention masks?](../glossary#attention-mask) + + + + For Whisper models, `attention_mask` should always be passed for batched inference, to avoid subtle + bugs. + + + stem_indices (`List[int]`, *optional*, defaults to `[3, 2]`): + Stem channels to extract if demucs outputs are passed. + """ + + model_input_names = ["input_features"] + + def __init__( + self, + feature_size=12, + sampling_rate=32000, + hop_length=4096, + chunk_length=30, + n_fft=16384, + num_chroma=12, + padding_value=0.0, + return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask + stem_indices=[3, 2], + **kwargs, + ): + super().__init__( + feature_size=feature_size, + sampling_rate=sampling_rate, + padding_value=padding_value, + return_attention_mask=return_attention_mask, + **kwargs, + ) + self.n_fft = n_fft + self.hop_length = hop_length + self.chunk_length = chunk_length + self.n_samples = chunk_length * sampling_rate + self.sampling_rate = sampling_rate + self.chroma_filters = torch.from_numpy( + chroma_filter_bank(sampling_rate=sampling_rate, num_frequency_bins=n_fft, tuning=0, num_chroma=num_chroma) + ).float() + self.spectrogram = torchaudio.transforms.Spectrogram( + n_fft=n_fft, win_length=n_fft, hop_length=hop_length, power=2, center=True, pad=0, normalized=True + ) + self.stem_indices = stem_indices + + def _torch_extract_fbank_features(self, waveform: torch.Tensor) -> torch.Tensor: + """ + Compute the chroma spectrogram of the provided audio using the torchaudio spectrogram implementation and the librosa chroma features. + """ + + # if wav length is not long enough, pad it + wav_length = waveform.shape[-1] + if wav_length < self.n_fft: + pad = self.n_fft - wav_length + rest = 0 if pad % 2 == 0 else 1 + waveform = torch.nn.functional.pad(waveform, (pad // 2, pad // 2 + rest), "constant", 0) + + # squeeze alongside channel dimension + spec = self.spectrogram(waveform).squeeze(1) + + # sum along the frequency dimension + raw_chroma = torch.einsum("cf, ...ft->...ct", self.chroma_filters, spec) + + # normalise with max value + norm_chroma = torch.nn.functional.normalize(raw_chroma, p=float("inf"), dim=-2, eps=1e-6) + + # transpose time and chroma dimension -> (batch, time, chroma) + norm_chroma = norm_chroma.transpose(1, 2) + + # replace max value alongside chroma dimension with 1 and replace the rest with 0 + idx = norm_chroma.argmax(-1, keepdim=True) + norm_chroma[:] = 0 + norm_chroma.scatter_(dim=-1, index=idx, value=1) + + return norm_chroma + + def _extract_stem_indices(self, audio, sampling_rate=None): + """ + Extracts stems from the output of the [Demucs](https://github.com/adefossez/demucs/tree/main) audio separation model, + then converts to mono-channel and resample to the feature extractor sampling rate. + + Args: + audio (`torch.Tensor` of shape `(batch_size, num_stems, channel_size, audio_length)`): + The output of the Demucs model to be processed. + sampling_rate (`int`, *optional*): + Demucs sampling rate. If not specified, defaults to `44000`. + """ + sampling_rate = 44000 if sampling_rate is None else sampling_rate + + # extract "vocals" and "others" sources from audio encoder (demucs) output + # [batch_size, num_stems, channel_size, audio_length] + wav = audio[:, torch.tensor(self.stem_indices)] + + # merge extracted stems to single waveform + wav = wav.sum(1) + + # convert to mono-channel waveform + wav = wav.mean(dim=1, keepdim=True) + + # resample to model sampling rate + # not equivalent to julius.resample + if sampling_rate != self.sampling_rate: + wav = torchaudio.functional.resample( + wav, sampling_rate, self.sampling_rate, rolloff=0.945, lowpass_filter_width=24 + ) + + # [batch_size, 1, audio_length] -> [batch_size, audio_length] + wav = wav.squeeze(1) + + return wav + + def __call__( + self, + audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], + truncation: bool = True, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_attention_mask: Optional[bool] = None, + padding: Optional[str] = True, + max_length: Optional[int] = None, + sampling_rate: Optional[int] = None, + **kwargs, + ) -> BatchFeature: + """ + Main method to featurize and prepare for the model one or several sequence(s). + + Args: + audio (`torch.Tensor`, `np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[torch.Tensor]`, `List[List[float]]`): + The sequence or batch of sequences to be padded. Each sequence can be a torch tensor, a numpy array, a list of float + values, a list of numpy arrays, a list of torch tensors, or a list of list of float values. + If `audio` is the output of Demucs, it has to be a torch tensor of shape `(batch_size, num_stems, channel_size, audio_length)`. + Otherwise, it must be mono or stereo channel audio. + truncation (`bool`, *optional*, default to `True`): + Activates truncation to cut input sequences longer than *max_length* to *max_length*. + pad_to_multiple_of (`int`, *optional*, defaults to None): + If set will pad the sequence to a multiple of the provided value. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return Numpy `np.ndarray` objects. + return_attention_mask (`bool`, *optional*): + Whether to return the attention mask. If left to the default, will return the attention mask according + to the specific feature_extractor's default. + + [What are attention masks?](../glossary#attention-mask) + + + For Musicgen Melody models, audio `attention_mask` is not necessary. + + + padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): + Select a strategy to pad the returned sequences (according to the model's padding side and padding + index) among: + + - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single + sequence if provided). + - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum + acceptable input length for the model if that argument is not provided. + - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different + lengths). + max_length (`int`, *optional*): + Maximum length of the returned list and optionally padding length (see above). + sampling_rate (`int`, *optional*): + The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass + `sampling_rate` at the forward call to prevent silent errors. + Note that if `audio` is the output of Demucs, `sampling_rate` must be the sampling rate at which Demucs operates. + """ + + if sampling_rate is None: + logger.warning_once( + "It is strongly recommended to pass the `sampling_rate` argument to this function. " + "Failing to do so can result in silent errors that might be hard to debug." + ) + + if isinstance(audio, torch.Tensor) and len(audio.shape) == 4: + logger.warning_once( + "`audio` is a 4-dimensional torch tensor and has thus been recognized as the output of `Demucs`. " + "If this is not the case, make sure to read Musicgen Melody docstrings and " + "to correct `audio` to get the right behaviour." + "Link to the docstrings: https://huggingface.co/docs/transformers/main/en/model_doc/musicgen_melody" + ) + audio = self._extract_stem_indices(audio, sampling_rate=sampling_rate) + elif sampling_rate is not None and sampling_rate != self.sampling_rate: + audio = torchaudio.functional.resample( + audio, sampling_rate, self.sampling_rate, rolloff=0.945, lowpass_filter_width=24 + ) + + is_batched = isinstance(audio, (np.ndarray, torch.Tensor)) and len(audio.shape) > 1 + is_batched = is_batched or ( + isinstance(audio, (list, tuple)) and (isinstance(audio[0], (torch.Tensor, np.ndarray, tuple, list))) + ) + + if is_batched and not isinstance(audio[0], torch.Tensor): + audio = [torch.tensor(speech, dtype=torch.float32).unsqueeze(-1) for speech in audio] + elif is_batched: + audio = [speech.unsqueeze(-1) for speech in audio] + elif not is_batched and not isinstance(audio, torch.Tensor): + audio = torch.tensor(audio, dtype=torch.float32).unsqueeze(-1) + + if isinstance(audio[0], torch.Tensor) and audio[0].dtype is torch.float64: + audio = [speech.to(torch.float32) for speech in audio] + + # always return batch + if not is_batched: + audio = [audio] + + if len(audio[0].shape) == 3: + logger.warning_once( + "`audio` has been detected as a batch of stereo signals. Will be convert to mono signals. " + "If this is an undesired behaviour, make sure to read Musicgen Melody docstrings and " + "to correct `audio` to get the right behaviour." + "Link to the docstrings: https://huggingface.co/docs/transformers/main/en/model_doc/musicgen_melody" + ) + # convert to mono-channel waveform + audio = [stereo.mean(dim=0) for stereo in audio] + + batched_speech = BatchFeature({"input_features": audio}) + + padded_inputs = self.pad( + batched_speech, + padding=padding, + max_length=max_length if max_length else self.n_samples, + truncation=truncation, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + return_tensors="pt", + ) + + input_features = self._torch_extract_fbank_features(padded_inputs["input_features"].squeeze(-1)) + + padded_inputs["input_features"] = input_features + + if return_attention_mask: + # rescale from raw audio length to spectrogram length + padded_inputs["attention_mask"] = padded_inputs["attention_mask"][:, :: self.hop_length] + + if return_tensors is not None: + padded_inputs = padded_inputs.convert_to_tensors(return_tensors) + + return padded_inputs + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. + """ + output = copy.deepcopy(self.__dict__) + output["feature_extractor_type"] = self.__class__.__name__ + if "mel_filters" in output: + del output["mel_filters"] + if "window" in output: + del output["window"] + if "chroma_filters" in output: + del output["chroma_filters"] + if "spectrogram" in output: + del output["spectrogram"] + return output diff --git a/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py b/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py new file mode 100644 index 00000000000000..660e37b4515e6b --- /dev/null +++ b/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py @@ -0,0 +1,2398 @@ +# coding=utf-8 +# Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Musicgen Melody model.""" +import copy +import inspect +import math +import random +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union + +import torch +import torch.nn as nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...generation.configuration_utils import GenerationConfig +from ...generation.logits_process import ClassifierFreeGuidanceLogitsProcessor, LogitsProcessorList +from ...generation.stopping_criteria import StoppingCriteriaList +from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask +from ...modeling_outputs import ( + BaseModelOutputWithPast, + ModelOutput, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from ..auto.configuration_auto import AutoConfig +from ..auto.modeling_auto import AutoModel, AutoModelForTextEncoding +from .configuration_musicgen_melody import MusicgenMelodyConfig, MusicgenMelodyDecoderConfig + + +if TYPE_CHECKING: + from ...generation.streamers import BaseStreamer + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "MusicgenMelodyConfig" +_CHECKPOINT_FOR_DOC = "facebook/musicgen-melody" + +MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "facebook/musicgen-melody", + # See all Musicgen Melody models at https://huggingface.co/models?filter=musicgen_melody +] + + +@dataclass +class MusicgenMelodyOutputWithPast(ModelOutput): + """ + Base class for Musicgen Melody autoregressive outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of conditional hidden-states representing the concatenation of the projeted text encoder output and the projeted audio encoder output. + Used as a conditional signal. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_hidden_states: Optional[torch.FloatTensor] = None + + +# Copied from transformers.models.encoder_decoder.modeling_encoder_decoder.shift_tokens_right +def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + """ + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + if decoder_start_token_id is None: + raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.") + shifted_input_ids[:, 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenSinusoidalPositionalEmbedding with Musicgen->MusicgenMelody +class MusicgenMelodySinusoidalPositionalEmbedding(nn.Module): + """This module produces sinusoidal positional embeddings of any length.""" + + def __init__(self, num_positions: int, embedding_dim: int): + super().__init__() + self.embedding_dim = embedding_dim + self.make_weights(num_positions, embedding_dim) + + def make_weights(self, num_embeddings: int, embedding_dim: int): + emb_weights = self.get_embedding(num_embeddings, embedding_dim) + if hasattr(self, "weights"): + # in forward put the weights on the correct dtype and device of the param + emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) + + self.weights = nn.Parameter(emb_weights) + self.weights.requires_grad = False + self.weights.detach_() + + @staticmethod + def get_embedding(num_embeddings: int, embedding_dim: int): + """ + Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the + description in Section 3.5 of "Attention Is All You Need". + """ + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) + emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) + emb = torch.cat([torch.cos(emb), torch.sin(emb)], dim=1).view(num_embeddings, -1) + if embedding_dim % 2 == 1: + # zero pad + emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) + return emb.to(torch.get_default_dtype()) + + @torch.no_grad() + # Ignore copy + def forward(self, inputs_embeds: torch.Tensor, past_key_values_length: int = 0): + bsz, seq_len, _ = inputs_embeds.size() + # Create the position ids from the input token ids. + position_ids = (torch.arange(seq_len) + past_key_values_length).to(inputs_embeds.device) + # expand embeddings if needed + if seq_len > self.weights.size(0): + self.make_weights(seq_len + self.offset, self.embedding_dim) + return self.weights.index_select(0, position_ids.view(-1)).detach() + + +# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->MusicgenMelody +class MusicgenMelodyAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + is_causal: bool = False, + config: Optional[MusicgenMelodyConfig] = None, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + self.config = config + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + self.is_causal = is_causal + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.reshape(*proj_shape) + value_states = value_states.reshape(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned across GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +class MusicgenMelodyDecoderLayer(nn.Module): + def __init__(self, config: MusicgenMelodyDecoderConfig): + super().__init__() + self.embed_dim = config.hidden_size + + self.self_attn = MusicgenMelodyAttention( + embed_dim=self.embed_dim, + num_heads=config.num_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + bias=False, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + + self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=False) + self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=False) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = True, + ) -> torch.Tensor: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(attention_heads,)`. + past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenPreTrainedModel with Musicgen->MusicgenMelody +class MusicgenMelodyPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = MusicgenMelodyDecoderConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["MusicgenMelodyDecoderLayer", "MusicgenMelodyAttention"] + + def _init_weights(self, module): + std = self.config.initializer_factor + if isinstance(module, (nn.Linear, nn.Conv1d)): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +MUSICGEN_MELODY_START_DOCSTRING = r""" + + The Musicgen Melody model was proposed in [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by + Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi, Alexandre Défossez. It is a + decoder-only transformer trained on the task of conditional music generation. + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`MusicgenMelodyConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +MUSICGEN_MELODY_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + input_features (`torch.FloatTensor` of shape `(batch_size, audio_sequence_length, num_chroma)`): + Input audio features. + This should be returned by the [`MusicgenMelodyFeatureExtractor`] class that you can also + retrieve from [`AutoFeatureExtractor`]. See [`MusicgenMelodyFeatureExtractor.__call__`] for details. + decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. + + Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, + such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + + + The `decoder_input_ids` will automatically be converted from shape `(batch_size * num_codebooks, + target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If + you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of + frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, + target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as + `decoder_input_ids`. + + + + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, encoder_sequence_length + sequence_length, embed_size_per_head)`). + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of conditional hidden-states representing the concatenation of the projeted text encoder output and the projeted audio encoder output. + Used as a conditional signal and will thus be concatenated to the projeted `decoder_input_ids`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be + input (see `past_key_values`). This is useful if you want more control over how to convert + `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` + are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +MUSICGEN_MELODY_DECODER_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`): + Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes. + + Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes, + such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + + + + The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks, + target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If + you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of + frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks, + target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as + `input_ids`. + + + + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states representing the concatenation of the text encoder output and the processed audio encoder output. + Used as a conditional signal and will thus be concatenated to the projeted `decoder_input_ids`. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing attention on conditional hidden states. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenDecoder with MUSICGEN->MUSICGEN_MELODY,Musicgen->MusicgenMelody +class MusicgenMelodyDecoder(MusicgenMelodyPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MusicgenMelodyDecoderLayer`] + """ + + def __init__(self, config: MusicgenMelodyDecoderConfig): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.layerdrop + self.max_target_positions = config.max_position_embeddings + self.d_model = config.hidden_size + self.num_codebooks = config.num_codebooks + self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 + + embed_dim = config.vocab_size + 1 + self.embed_tokens = nn.ModuleList( + [nn.Embedding(embed_dim, config.hidden_size) for _ in range(config.num_codebooks)] + ) + + self.embed_positions = MusicgenMelodySinusoidalPositionalEmbedding( + config.max_position_embeddings, + config.hidden_size, + ) + + self.layers = nn.ModuleList([MusicgenMelodyDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.layer_norm = nn.LayerNorm(config.hidden_size) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(MUSICGEN_MELODY_DECODER_INPUTS_DOCSTRING) + # Ignore copy + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + # (bsz * codebooks, seq_len) -> (bsz, codebooks, seq_len) + input = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1]) + bsz, num_codebooks, seq_len = input.shape + input_shape = (bsz, seq_len) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1:] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + inputs_embeds = sum([self.embed_tokens[codebook](input[:, codebook]) for codebook in range(num_codebooks)]) + + if encoder_hidden_states is not None: + # take care of attention masks + if encoder_attention_mask is not None and attention_mask is None: + attention_mask = torch.ones(inputs_embeds.shape[:2], device=inputs_embeds.device) + + if attention_mask is not None: + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_states.shape[:2], device=attention_mask.device) + attention_mask = torch.cat([encoder_attention_mask, attention_mask], dim=1) + + # fuse encoder_hidden_states and inputs_embeds + inputs_embeds = torch.cat([encoder_hidden_states, inputs_embeds], dim=1) + + input_shape = inputs_embeds.size()[:-1] + + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + + # embed positions + positions = self.embed_positions(inputs_embeds, past_key_values_length) + + hidden_states = inputs_embeds + positions.to(inputs_embeds.device) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + if head_mask.size()[0] != len(self.layers): + raise ValueError( + f"The `head_mask` should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.forward, + hidden_states, + attention_mask, + head_mask[idx] if head_mask is not None else None, + None, + output_attentions, + use_cache, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_attentions += (layer_outputs[1],) + + hidden_states = self.layer_norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attentions] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_attentions, + ) + + +@add_start_docstrings( + "The bare MusicgenMelody decoder model outputting raw hidden-states without any specific head on top.", + MUSICGEN_MELODY_START_DOCSTRING, +) +# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenModel with MUSICGEN->MUSICGEN_MELODY,Musicgen->MusicgenMelody +class MusicgenMelodyModel(MusicgenMelodyPreTrainedModel): + def __init__(self, config: MusicgenMelodyDecoderConfig): + super().__init__(config) + self.decoder = MusicgenMelodyDecoder(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.decoder.embed_tokens = value + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(MUSICGEN_MELODY_DECODER_INPUTS_DOCSTRING) + # Ignore copy + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=input_ids, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + head_mask=head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + + return BaseModelOutputWithPast( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + hidden_states=decoder_outputs.hidden_states, + attentions=decoder_outputs.attentions, + ) + + +@add_start_docstrings( + "The Musicgen Melody decoder model with a language modelling head on top.", + MUSICGEN_MELODY_START_DOCSTRING, +) +# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenForCausalLM with MUSICGEN->MUSICGEN_MELODY,Musicgen->MusicgenMelody,MusicGen->Musicgen Melody +class MusicgenMelodyForCausalLM(MusicgenMelodyPreTrainedModel): + def __init__(self, config: MusicgenMelodyDecoderConfig): + super().__init__(config) + + self.model = MusicgenMelodyModel(config) + + self.num_codebooks = config.num_codebooks + self.lm_heads = nn.ModuleList( + [nn.Linear(config.hidden_size, config.vocab_size, bias=False) for _ in range(config.num_codebooks)] + ) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.model.decoder.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_heads + + def set_output_embeddings(self, new_embeddings): + self.lm_heads = new_embeddings + + def set_decoder(self, decoder): + self.model.decoder = decoder + + def get_decoder(self): + return self.model.decoder + + @add_start_docstrings_to_model_forward(MUSICGEN_MELODY_DECODER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=MusicgenMelodyOutputWithPast, config_class=_CONFIG_FOR_DOC) + # Ignore copy + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, MusicgenMelodyOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` + are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` + Returns: + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + head_mask=head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + + lm_logits = torch.stack([head(hidden_states) for head in self.lm_heads], dim=1) + + loss = None + if labels is not None: + raise NotImplementedError("Training is not implemented for MusicgenMelody.") + + # (bsz, num_codebooks, seq_len, vocab_size) -> (bsz * num_codebooks, seq_len, vocab_size) + lm_logits = lm_logits.reshape(-1, *lm_logits.shape[2:]) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return MusicgenMelodyOutputWithPast( + loss=loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Ignore copy + def prepare_inputs_for_generation( + self, + input_ids, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + head_mask=None, + past_key_values=None, + use_cache=True, + delay_pattern_mask=None, + guidance_scale=None, + **kwargs, + ): + if delay_pattern_mask is None: + input_ids, delay_pattern_mask = self.build_delay_pattern_mask( + input_ids, + pad_token_id=self.generation_config.pad_token_id, + max_length=self.generation_config.max_length, + ) + + # apply the delay pattern mask + input_ids = self.apply_delay_pattern_mask(input_ids, delay_pattern_mask) + + if guidance_scale is not None and guidance_scale > 1: + # for classifier free guidance we need to replicate the decoder args across the batch dim (we'll split these + # before sampling) + input_ids = input_ids.repeat((2, 1)) + if attention_mask is not None: + attention_mask = attention_mask.repeat((2, 1)) + + if encoder_hidden_states is not None: + encoder_hidden_states = torch.concatenate( + [encoder_hidden_states, torch.zeros_like(encoder_hidden_states)], dim=0 + ) + + if encoder_attention_mask is not None: + encoder_attention_mask = torch.concatenate( + encoder_attention_mask, torch.zeros_like(encoder_attention_mask), dim=0 + ) + + if past_key_values is not None: + input_ids = input_ids[:, -1:] + + # we only want to use conditional signal in the 1st generation step but keeping the attention mask + encoder_hidden_states = None + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "encoder_hidden_states": encoder_hidden_states, + "encoder_attention_mask": encoder_attention_mask, + "head_mask": head_mask, + "past_key_values": past_key_values, + "use_cache": use_cache, + } + + def build_delay_pattern_mask(self, input_ids: torch.LongTensor, pad_token_id: int, max_length: int = None): + """Build a delayed pattern mask to the input_ids. Each codebook is offset by the previous codebook by + one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there + are 4 codebooks and a max sequence length of 8, we have the delayed pattern mask of shape `(codebooks, + seq_len)`: + - [P, -1, -1, -1, -1, P, P, P] + - [P, P, -1, -1, -1, -1, P, P] + - [P, P, P, -1, -1, -1, -1, P] + - [P, P, P, P, -1, -1, -1, -1] + where P is the special padding token id and -1 indicates that the token is valid for prediction. If we include + a prompt (decoder input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the + mask is set to the value in the prompt: + - [P, a, b, -1, -1, P, P, P] + - [P, P, c, d, -1, -1, P, P] + - [P, P, P, e, f, -1, -1, P] + - [P, P, P, P, g, h, -1, -1] + where a-h indicate the input prompt (decoder input ids) that are offset by 1. Now, we only override the -1 + tokens in our prediction. + """ + # (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len) + input_ids = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1]) + bsz, num_codebooks, seq_len = input_ids.shape + + max_length = max_length if max_length is not None else self.generation_config.max_length + input_ids_shifted = ( + torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1 + ) + + channel_codebooks = num_codebooks // 2 if self.config.audio_channels == 2 else num_codebooks + # we only apply the mask if we have a large enough seq len - otherwise we return as is + if max_length < 2 * channel_codebooks - 1: + return input_ids.reshape(bsz * num_codebooks, -1), input_ids_shifted.reshape(bsz * num_codebooks, -1) + + # fill the shifted ids with the prompt entries, offset by the codebook idx + for codebook in range(channel_codebooks): + if self.config.audio_channels == 1: + # mono channel - loop over the codebooks one-by-one + input_ids_shifted[:, codebook, codebook : seq_len + codebook] = input_ids[:, codebook] + else: + # left/right channels are interleaved in the generated codebooks, so handle one then the other + input_ids_shifted[:, 2 * codebook, codebook : seq_len + codebook] = input_ids[:, 2 * codebook] + input_ids_shifted[:, 2 * codebook + 1, codebook : seq_len + codebook] = input_ids[:, 2 * codebook + 1] + + # construct a pattern mask that indicates the positions of padding tokens for each codebook + # first fill the upper triangular part (the EOS padding) + delay_pattern = torch.triu( + torch.ones((channel_codebooks, max_length), dtype=torch.bool), diagonal=max_length - channel_codebooks + 1 + ) + # then fill the lower triangular part (the BOS padding) + delay_pattern = delay_pattern + torch.tril(torch.ones((channel_codebooks, max_length), dtype=torch.bool)) + + if self.config.audio_channels == 2: + # for left/right channel we need to duplicate every row of the pattern mask in an interleaved fashion + delay_pattern = delay_pattern.repeat_interleave(2, dim=0) + + mask = ~delay_pattern.to(input_ids.device) + input_ids = mask * input_ids_shifted + ~mask * pad_token_id + + # find the first position to start generating - this is the first place we have the -1 token + # and will always be in the first codebook (since it has no codebook offset) + first_codebook_ids = input_ids[:, 0, :] + start_ids = (first_codebook_ids == -1).nonzero()[:, 1] + if len(start_ids) > 0: + first_start_id = min(start_ids) + else: + # we have no tokens that need to be filled - return entire matrix of input ids + first_start_id = seq_len + + # (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len) + pattern_mask = input_ids.reshape(bsz * num_codebooks, -1) + input_ids = input_ids[..., :first_start_id].reshape(bsz * num_codebooks, -1) + return input_ids, pattern_mask + + @staticmethod + def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask): + """Apply a delay pattern mask to the decoder input ids, only preserving predictions where + the mask is set to -1, and otherwise setting to the value detailed in the mask.""" + seq_len = input_ids.shape[-1] + decoder_pad_token_mask = decoder_pad_token_mask[..., :seq_len] + input_ids = torch.where(decoder_pad_token_mask == -1, input_ids, decoder_pad_token_mask) + return input_ids + + @torch.no_grad() + def generate( + self, + inputs: Optional[torch.Tensor] = None, + generation_config: Optional[GenerationConfig] = None, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + synced_gpus: Optional[bool] = None, + streamer: Optional["BaseStreamer"] = None, + **kwargs, + ): + """ + + Generates sequences of token ids for models with a language modeling head. + + + + Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the + model's default generation configuration. You can override any `generation_config` by passing the corresponding + parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. + + For an overview of generation strategies and code examples, check out the [following + guide](./generation_strategies). + + + + Parameters: + inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): + The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the + method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` + should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of + `input_ids`, `input_values`, `input_features`, or `pixel_values`. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + logits_processor (`LogitsProcessorList`, *optional*): + Custom logits processors that complement the default logits processors built from arguments and + generation config. If a logit processor is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + stopping_criteria (`StoppingCriteriaList`, *optional*): + Custom stopping criteria that complement the default stopping criteria built from arguments and a + generation config. If a stopping criteria is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder + specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. + + Return: + [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` + or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. + + If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.GenerateDecoderOnlyOutput`], + - [`~generation.GenerateBeamDecoderOnlyOutput`] + + If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.GenerateEncoderDecoderOutput`], + - [`~generation.GenerateBeamEncoderDecoderOutput`] + """ + # 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects + if generation_config is None: + generation_config = self.generation_config + + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs + generation_config.validate() + self._validate_model_kwargs(model_kwargs.copy()) + + # 2. Set generation parameters if not already defined + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + + if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: + if model_kwargs.get("attention_mask", None) is None: + logger.warning( + "The attention mask and the pad token id were not set. As a consequence, you may observe " + "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." + ) + eos_token_id = generation_config.eos_token_id + if isinstance(eos_token_id, list): + eos_token_id = eos_token_id[0] + logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") + generation_config.pad_token_id = eos_token_id + + # 3. Define model inputs + # inputs_tensor has to be defined + # model_input_name is defined if model-specific keyword input is passed + # otherwise model_input_name is None + # all model-specific keyword inputs are removed from `model_kwargs` + input_ids, model_input_name, model_kwargs = self._prepare_model_inputs( + inputs, generation_config.bos_token_id, model_kwargs + ) + batch_size = input_ids.shape[0] // self.num_codebooks + + # 4. Define other model kwargs + model_kwargs["output_attentions"] = generation_config.output_attentions + model_kwargs["output_hidden_states"] = generation_config.output_hidden_states + model_kwargs["use_cache"] = generation_config.use_cache + model_kwargs["guidance_scale"] = generation_config.guidance_scale + + # Ignore copy + if model_kwargs.get("attention_mask", None) is None: + model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( + input_ids, generation_config.pad_token_id, generation_config.eos_token_id + ) + + # 5. Prepare `max_length` depending on other stopping criteria. + input_ids_seq_length = input_ids.shape[-1] + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20: + logger.warning( + f"Using the model-agnostic default `max_length` (={generation_config.max_length}) " + "to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation." + ) + elif generation_config.max_new_tokens is not None: + if not has_default_max_length: + logger.warning( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" + ) + generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length + + if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length: + raise ValueError( + f"Unfeasible length constraints: the minimum length ({generation_config.min_length}) is larger than" + f" the maximum length ({generation_config.max_length})" + ) + if input_ids_seq_length >= generation_config.max_length: + logger.warning( + f"Input length of decoder_input_ids is {input_ids_seq_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing `max_new_tokens`." + ) + + # 6. Prepare `input_ids` which will be used for auto-regressive generation + # Build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to MusicGen) + input_ids, delay_pattern_mask = self.build_delay_pattern_mask( + input_ids, + pad_token_id=generation_config.decoder_start_token_id, + max_length=generation_config.max_length, + ) + + if streamer is not None: + streamer.put(input_ids.cpu()) + + # stash the delay mask so that we don't have to recompute it in each forward pass + model_kwargs["delay_pattern_mask"] = delay_pattern_mask + + # 7. determine generation mode + is_greedy_gen_mode = ( + (generation_config.num_beams == 1) + and (generation_config.num_beam_groups == 1) + and generation_config.do_sample is False + ) + is_sample_gen_mode = ( + (generation_config.num_beams == 1) + and (generation_config.num_beam_groups == 1) + and generation_config.do_sample is True + ) + + # 8. prepare batched CFG externally (to enable coexistance with the unbatched CFG) + if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: + logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) + generation_config.guidance_scale = None + + # 9. prepare distribution pre_processing samplers + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_seq_length, + encoder_input_ids=input_ids, + prefix_allowed_tokens_fn=None, + logits_processor=logits_processor, + ) + + # 10. prepare stopping criteria + stopping_criteria = self._get_stopping_criteria( + generation_config=generation_config, stopping_criteria=stopping_criteria + ) + + if is_greedy_gen_mode: + if generation_config.num_return_sequences > 1: + raise ValueError( + "num_return_sequences has to be 1 when doing greedy search, " + f"but is {generation_config.num_return_sequences}." + ) + + # 11. run greedy search + outputs = self.greedy_search( + input_ids, + logits_processor=logits_processor, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + **model_kwargs, + ) + + elif is_sample_gen_mode: + # 11. prepare logits warper + logits_warper = self._get_logits_warper(generation_config) + + # expand input_ids with `num_return_sequences` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_return_sequences, + **model_kwargs, + ) + + # 12. run sample + outputs = self.sample( + input_ids, + logits_processor=logits_processor, + logits_warper=logits_warper, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + **model_kwargs, + ) + + else: + raise ValueError( + "Got incompatible mode for generation, should be one of greedy or sampling. " + "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`." + ) + + if generation_config.return_dict_in_generate: + output_ids = outputs.sequences + else: + output_ids = outputs + + # apply the pattern mask to the final ids + output_ids = self.apply_delay_pattern_mask(output_ids, model_kwargs["delay_pattern_mask"]) + + # revert the pattern delay mask by filtering the pad token id + output_ids = output_ids[output_ids != generation_config.pad_token_id].reshape( + batch_size, self.num_codebooks, -1 + ) + + if generation_config.return_dict_in_generate: + outputs.sequences = output_ids + return outputs + else: + return output_ids + + +@add_start_docstrings( + "The composite Musicgen Melody model with a text and audio conditional models, a MusicgenMelody decoder and an audio encoder, " + "for music generation tasks with one or both of text and audio prompts.", + MUSICGEN_MELODY_START_DOCSTRING, + """ + text_encoder (`Optional[PreTrainedModel]`, *optional*): Text encoder. + audio_encoder (`Optional[PreTrainedModel]`, *optional*): Audio code decoder. + decoder (`Optional[MusicgenMelodyForCausalLM]`, *optional*): MusicGen Melody decoder used to generate audio codes. + """, +) +class MusicgenMelodyForConditionalGeneration(PreTrainedModel): + config_class = MusicgenMelodyConfig + main_input_name = "input_ids" + supports_gradient_checkpointing = True + + def __init__( + self, + config: MusicgenMelodyConfig = None, + text_encoder: Optional[PreTrainedModel] = None, + audio_encoder: Optional[PreTrainedModel] = None, + decoder: Optional[MusicgenMelodyForCausalLM] = None, + ): + if config is None and None in (text_encoder, audio_encoder, decoder): + raise ValueError( + "Either a configuration has to be provided, or all three of text encoder, audio encoder and Musicgen Melody decoder." + ) + if config is None: + config = MusicgenMelodyConfig.from_sub_models_config( + text_encoder.config, audio_encoder.config, decoder.config + ) + else: + if not isinstance(config, self.config_class): + raise ValueError(f"Config: {config} has to be of type {self.config_class}") + + # initialize with config + super().__init__(config) + + if text_encoder is None: + text_encoder = AutoModelForTextEncoding.from_config(config.text_encoder) + + if audio_encoder is None: + audio_encoder = AutoModel.from_config(config.audio_encoder) + + if decoder is None: + decoder = MusicgenMelodyForCausalLM(config.decoder) + + self.text_encoder = text_encoder + self.audio_encoder = audio_encoder + self.decoder = decoder + + # make sure that the individual model's config refers to the shared config + # so that the updates to the config will be synced + self.text_encoder.config = self.config.text_encoder + self.audio_encoder.config = self.config.audio_encoder + self.decoder.config = self.config.decoder + + # text encoder outputs might need to be projected to different dimension for decoder + if self.text_encoder.config.hidden_size != self.decoder.config.hidden_size: + self.enc_to_dec_proj = nn.Linear(self.text_encoder.config.hidden_size, self.decoder.config.hidden_size) + + # audio encoder outputs after chroma extraction might need to be projected to different dimension for decoder + if self.config.num_chroma != self.decoder.config.hidden_size: + self.audio_enc_to_dec_proj = nn.Linear(self.config.num_chroma, self.decoder.config.hidden_size) + + if self.text_encoder.get_output_embeddings() is not None: + raise ValueError( + f"The encoder {self.text_encoder} should not have a LM Head. Please use a model without and LM Head" + ) + + # Initialize projection layers weights and tie text encoder and decoder weights if set accordingly + self.post_init() + + def _init_weights(self, module): + # MusicgenMelodyForConditionalGeneration is made of PreTrainedModels that have already been initialized + # Projection layers still need to be initialized. + std = self.decoder.config.initializer_factor + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + + def tie_weights(self): + # tie text encoder & decoder if needed + if self.config.tie_encoder_decoder: + # tie text encoder and decoder base model + decoder_base_model_prefix = self.decoder.base_model_prefix + self._tie_encoder_decoder_weights( + self.text_encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix + ) + + def get_text_encoder(self): + return self.text_encoder + + def get_encoder(self): + # get the text encoder to compute the conditionning hidden-states for generation + return self.get_text_encoder() + + def get_decoder(self): + return self.decoder + + def get_input_embeddings(self): + return self.text_encoder.get_input_embeddings() + + def get_output_embeddings(self): + return self.decoder.get_output_embeddings() + + def set_output_embeddings(self, new_embeddings): + return self.decoder.set_output_embeddings(new_embeddings) + + @classmethod + # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenForConditionalGeneration.from_sub_models_pretrained with Musicgen->MusicgenMelody, musicgen-small->musicgen-melody + def from_sub_models_pretrained( + cls, + text_encoder_pretrained_model_name_or_path: str = None, + audio_encoder_pretrained_model_name_or_path: str = None, + decoder_pretrained_model_name_or_path: str = None, + *model_args, + **kwargs, + ) -> PreTrainedModel: + r""" + Instantiate a text encoder, an audio encoder, and a MusicGen decoder from one, two or three base classes of the + library from pretrained model checkpoints. + + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train + the model, you need to first set it back in training mode with `model.train()`. + + Params: + text_encoder_pretrained_model_name_or_path (`str`, *optional*): + Information necessary to initiate the text encoder. Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + + audio_encoder_pretrained_model_name_or_path (`str`, *optional*): + Information necessary to initiate the audio encoder. Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + + decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): + Information necessary to initiate the decoder. Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + + model_args (remaining positional arguments, *optional*): + All remaining positional arguments will be passed to the underlying model's `__init__` method. + + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). + + - To update the text encoder configuration, use the prefix *text_encoder_* for each configuration + parameter. + - To update the audio encoder configuration, use the prefix *audio_encoder_* for each configuration + parameter. + - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. + - To update the parent model configuration, do not use a prefix for each configuration parameter. + + Behaves differently depending on whether a `config` is provided or automatically loaded. + + Example: + + ```python + >>> from transformers import MusicgenMelodyForConditionalGeneration + + >>> # initialize a musicgen model from a t5 text encoder, encodec audio encoder, and musicgen decoder + >>> model = MusicgenMelodyForConditionalGeneration.from_sub_models_pretrained( + ... text_encoder_pretrained_model_name_or_path="google-t5/t5-base", + ... audio_encoder_pretrained_model_name_or_path="facebook/encodec_24khz", + ... decoder_pretrained_model_name_or_path="facebook/musicgen-melody", + ... ) + >>> # saving model after fine-tuning + >>> model.save_pretrained("./musicgen-ft") + >>> # load fine-tuned model + >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("./musicgen-ft") + ```""" + + kwargs_text_encoder = { + argument[len("text_encoder_") :]: value + for argument, value in kwargs.items() + if argument.startswith("text_encoder_") + } + + kwargs_audio_encoder = { + argument[len("audio_encoder_") :]: value + for argument, value in kwargs.items() + if argument.startswith("audio_encoder_") + } + + kwargs_decoder = { + argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") + } + + # remove text encoder, audio encoder and decoder kwargs from kwargs + for key in kwargs_text_encoder.keys(): + del kwargs["text_encoder_" + key] + for key in kwargs_audio_encoder.keys(): + del kwargs["audio_encoder_" + key] + for key in kwargs_decoder.keys(): + del kwargs["decoder_" + key] + + # Load and initialize the encoder and decoder + # The distinction between encoder and decoder at the model level is made + # by the value of the flag `is_decoder` that we need to set correctly. + text_encoder = kwargs_text_encoder.pop("model", None) + if text_encoder is None: + if text_encoder_pretrained_model_name_or_path is None: + raise ValueError( + "If `text_encoder_model` is not defined as an argument, a `text_encoder_pretrained_model_name_or_path` has " + "to be defined." + ) + + if "config" not in kwargs_text_encoder: + encoder_config, kwargs_text_encoder = AutoConfig.from_pretrained( + text_encoder_pretrained_model_name_or_path, **kwargs_text_encoder, return_unused_kwargs=True + ) + + if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: + logger.info( + f"Initializing {text_encoder_pretrained_model_name_or_path} as a text_encoder model " + "from a decoder model. Cross-attention and casual mask are disabled." + ) + encoder_config.is_decoder = False + encoder_config.add_cross_attention = False + + kwargs_text_encoder["config"] = encoder_config + + text_encoder = AutoModel.from_pretrained( + text_encoder_pretrained_model_name_or_path, *model_args, **kwargs_text_encoder + ) + + audio_encoder = kwargs_audio_encoder.pop("model", None) + if audio_encoder is None: + if audio_encoder_pretrained_model_name_or_path is None: + raise ValueError( + "If `audio_encoder_model` is not defined as an argument, an `audio_encoder_pretrained_model_name_or_path` has " + "to be defined." + ) + + if "config" not in kwargs_audio_encoder: + encoder_config, kwargs_audio_encoder = AutoConfig.from_pretrained( + audio_encoder_pretrained_model_name_or_path, **kwargs_audio_encoder, return_unused_kwargs=True + ) + + if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: + logger.info( + f"Initializing {audio_encoder_pretrained_model_name_or_path} as an audio_encoder model " + "from a decoder model. Cross-attention and casual mask are disabled." + ) + encoder_config.is_decoder = False + encoder_config.add_cross_attention = False + + kwargs_audio_encoder["config"] = encoder_config + + audio_encoder = AutoModel.from_pretrained( + audio_encoder_pretrained_model_name_or_path, *model_args, **kwargs_audio_encoder + ) + + decoder = kwargs_decoder.pop("model", None) + if decoder is None: + if decoder_pretrained_model_name_or_path is None: + raise ValueError( + "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " + "to be defined." + ) + + if "config" not in kwargs_decoder: + decoder_config, kwargs_decoder = AutoConfig.from_pretrained( + decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True + ) + + if isinstance(decoder_config, MusicgenMelodyConfig): + decoder_config = decoder_config.decoder + + if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: + logger.info( + f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" + f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" + f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." + ) + decoder_config.is_decoder = True + decoder_config.add_cross_attention = True + + kwargs_decoder["config"] = decoder_config + + if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: + logger.warning( + f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " + f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " + "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " + "passed to `.from_sub_models_pretrained(...)` are set to `True` or do not pass a " + "`decoder_config` to `.from_sub_models_pretrained(...)`" + ) + + decoder = MusicgenMelodyForCausalLM.from_pretrained( + decoder_pretrained_model_name_or_path, **kwargs_decoder + ) + + # instantiate config with corresponding kwargs + config = MusicgenMelodyConfig.from_sub_models_config( + text_encoder.config, audio_encoder.config, decoder.config, **kwargs + ) + return cls(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder, config=config) + + @add_start_docstrings_to_model_forward(MUSICGEN_MELODY_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=MusicgenMelodyOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.BoolTensor] = None, + input_features: Optional[torch.FloatTensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + past_key_values: Tuple[Tuple[torch.FloatTensor]] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Tuple, MusicgenMelodyOutputWithPast]: + r""" + Returns: + + Examples: + ```python + >>> from transformers import AutoProcessor, MusicgenMelodyForConditionalGeneration + >>> import torch + + >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-melody") + >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") + + >>> inputs = processor( + ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], + ... padding=True, + ... return_tensors="pt", + ... ) + + >>> pad_token_id = model.generation_config.pad_token_id + >>> decoder_input_ids = ( + ... torch.ones((inputs.input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long) + ... * pad_token_id + ... ) + + >>> logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits + >>> logits.shape # (bsz * num_codebooks, encoder_len + tgt_len, vocab_size) + torch.Size([8, 249, 2048]) + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + kwargs_text_encoder = { + argument[len("text_encoder_")]: value + for argument, value in kwargs.items() + if argument.startswith("text_encoder_") + } + + kwargs_decoder = { + argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") + } + + if encoder_hidden_states is None: + if inputs_embeds is not None or input_ids is not None: + encoder_outputs = self.text_encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs_text_encoder, + ) + + encoder_hidden_states = encoder_outputs[0] + + # optionally project encoder_hidden_states + if self.text_encoder.config.hidden_size != self.decoder.config.hidden_size: + encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) + + if attention_mask is not None and encoder_hidden_states is not None: + encoder_hidden_states = encoder_hidden_states * attention_mask[..., None] + + # set a default audio conditional hidden states if text is not None + if encoder_hidden_states is not None and input_features is None: + input_features = torch.zeros( + (encoder_hidden_states.shape[0], 1, self.config.num_chroma), + device=self.device, + dtype=self.dtype, + ) + input_features[:, :, 0] = 1 + + if input_features is not None: + audio_hidden_states = input_features + + # optionally project audio_hidden_states -> + # (batch_size, seq_len, num_chroma) -> (batch_size, seq_len, hidden_size) + if self.config.num_chroma != self.decoder.config.hidden_size: + audio_hidden_states = self.audio_enc_to_dec_proj(audio_hidden_states) + + # pad or truncate to config.chroma_length + if audio_hidden_states.shape[1] < self.config.chroma_length: + n_repeat = int(math.ceil(self.config.chroma_length / audio_hidden_states.shape[1])) + audio_hidden_states = audio_hidden_states.repeat(1, n_repeat, 1) + else: + logger.warning( + f"The conditional audio signal is of length {audio_hidden_states.shape[1]}, which exceeds" + f"the maximum chroma duration of {self.config.chroma_length}." + f"The audio will be truncated to {self.config.chroma_length} frames." + ) + audio_hidden_states = audio_hidden_states[:, : self.config.chroma_length] + + if encoder_hidden_states is not None: + encoder_hidden_states = torch.cat([audio_hidden_states, encoder_hidden_states], dim=1) + else: + encoder_hidden_states = audio_hidden_states + + if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None): + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + # Decode + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_hidden_states, + inputs_embeds=decoder_inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + use_cache=use_cache, + past_key_values=past_key_values, + return_dict=return_dict, + **kwargs_decoder, + ) + + loss = None + if labels is not None: + logits = decoder_outputs.logits if return_dict else decoder_outputs[0] + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + if loss is not None: + return (loss,) + decoder_outputs + (encoder_hidden_states,) + else: + return decoder_outputs + (encoder_hidden_states,) + + return MusicgenMelodyOutputWithPast( + loss=loss, + logits=decoder_outputs.logits, + past_key_values=decoder_outputs.past_key_values, + hidden_states=decoder_outputs.hidden_states, + attentions=decoder_outputs.attentions, + encoder_hidden_states=encoder_hidden_states, + ) + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + encoder_hidden_states=None, + past_key_values=None, + attention_mask=None, + decoder_attention_mask=None, + decoder_head_mask=None, + use_cache=None, + decoder_delay_pattern_mask=None, + guidance_scale=None, + **kwargs, + ): + if decoder_delay_pattern_mask is None: + decoder_input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask( + decoder_input_ids, + self.generation_config.pad_token_id, + max_length=self.generation_config.max_length, + ) + + # apply the delay pattern mask + decoder_input_ids = self.decoder.apply_delay_pattern_mask(decoder_input_ids, decoder_delay_pattern_mask) + + if guidance_scale is not None and guidance_scale > 1: + # for classifier free guidance we need to replicate the decoder args across the batch dim (we'll split these + # before sampling) + decoder_input_ids = decoder_input_ids.repeat((2, 1)) + if decoder_attention_mask is not None: + decoder_attention_mask = decoder_attention_mask.repeat((2, 1)) + + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if decoder_input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = decoder_input_ids.shape[1] - 1 + + decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] + + # we only want to use conditional signal in the 1st generation step but keeping the attention mask + encoder_hidden_states = None + # we also have to update the attention mask + + return { + "input_ids": None, # encoder_hidden_states is defined. input_ids not needed + "encoder_hidden_states": encoder_hidden_states, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "decoder_attention_mask": decoder_attention_mask, + "decoder_head_mask": decoder_head_mask, + "use_cache": use_cache, + } + + # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenForConditionalGeneration._prepare_decoder_input_ids_for_generation + def _prepare_decoder_input_ids_for_generation( + self, + batch_size: int, + model_input_name: str, + model_kwargs: Dict[str, torch.Tensor], + decoder_start_token_id: int = None, + bos_token_id: int = None, + device: torch.device = None, + ) -> Tuple[torch.LongTensor, Dict[str, torch.Tensor]]: + """Prepares `decoder_input_ids` for generation with encoder-decoder models""" + + # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming, + # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input. + if model_kwargs is not None and "decoder_input_ids" in model_kwargs: + decoder_input_ids = model_kwargs.pop("decoder_input_ids") + elif "input_ids" in model_kwargs and model_input_name != "input_ids": + decoder_input_ids = model_kwargs.pop("input_ids") + else: + decoder_input_ids = None + + # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that. + decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) + if device is None: + device = self.device + decoder_input_ids_start = ( + torch.ones((batch_size * self.decoder.num_codebooks, 1), dtype=torch.long, device=device) + * decoder_start_token_id + ) + + # no user input -> use decoder_start_token_id as decoder_input_ids + if decoder_input_ids is None: + decoder_input_ids = decoder_input_ids_start + + # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust + # decoder_attention_mask if provided) + elif (decoder_input_ids[..., 0] != decoder_start_token_id).all().item(): + decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1) + if "decoder_attention_mask" in model_kwargs: + decoder_attention_mask = model_kwargs["decoder_attention_mask"] + decoder_attention_mask = torch.cat( + (torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), + dim=-1, + ) + model_kwargs["decoder_attention_mask"] = decoder_attention_mask + + return decoder_input_ids, model_kwargs + + def _prepare_encoder_hidden_states_kwargs_for_generation( + self, + inputs_tensor: torch.Tensor, + model_kwargs, + model_input_name: Optional[str] = None, + guidance_scale: Optional[float] = None, + ) -> Dict[str, Any]: + encoder_hidden_states = None + # attention mask is consumed once to produce text conditional hidden states through the text encoder + encoder_attention_mask = model_kwargs.pop("attention_mask") + + # 1. condition on text + if inputs_tensor is not None: + encoder = self.get_text_encoder() + # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device + # as the inputs. + if hasattr(encoder, "_hf_hook"): + encoder._hf_hook.io_same_device = True + + # Prepare args and kwargs from model kwargs. + irrelevant_prefix = ["decoder_", "use_cache"] + encoder_kwargs = { + argument: value + for argument, value in model_kwargs.items() + if not any(argument.startswith(p) for p in irrelevant_prefix) + } + encoder_signature = set(inspect.signature(encoder.forward).parameters) + encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature + if not encoder_accepts_wildcard: + encoder_kwargs = { + argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature + } + + # make sure that encoder returns `ModelOutput` + model_input_name = model_input_name if model_input_name is not None else self.text_encoder.main_input_name + encoder_kwargs["return_dict"] = True + encoder_kwargs[model_input_name] = inputs_tensor + if encoder_attention_mask is not None: + encoder_kwargs["attention_mask"] = encoder_attention_mask + encoder_hidden_states = encoder(**encoder_kwargs).last_hidden_state + + # optionally project encoder_hidden_states + if self.text_encoder.config.hidden_size != self.decoder.config.hidden_size: + encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) + + # for classifier free guidance we need to add a 'null' input to our encoder hidden states + if guidance_scale is not None and guidance_scale > 1: + encoder_hidden_states = torch.concatenate( + [encoder_hidden_states, torch.zeros_like(encoder_hidden_states)], dim=0 + ) + if encoder_attention_mask is not None: + encoder_attention_mask = torch.concatenate( + [encoder_attention_mask, torch.zeros_like(encoder_attention_mask)], dim=0 + ) + if encoder_attention_mask is not None: + encoder_hidden_states = encoder_hidden_states * encoder_attention_mask[..., None] + + # 2. condition on audio + audio_hidden_states = model_kwargs.get("input_features", None) + + if inputs_tensor is not None: + if audio_hidden_states is not None: + null_audio_hidden_states = torch.zeros_like(audio_hidden_states) + else: + null_audio_hidden_states = torch.zeros( + (inputs_tensor.shape[0], 1, self.config.num_chroma), device=self.device, dtype=self.dtype + ) + null_audio_hidden_states[:, :, 0] = 1 + + if audio_hidden_states is None: + audio_hidden_states = null_audio_hidden_states + + if audio_hidden_states is not None: + # for classifier free guidance we need to add a 'null' input to our audio hidden states + if guidance_scale is not None and guidance_scale > 1: + audio_hidden_states = torch.concatenate([audio_hidden_states, null_audio_hidden_states], dim=0) + + # optionally project audio_hidden_states -> + # (batch_size, seq_len, num_chroma) -> (batch_size, seq_len, hidden_size) + if self.config.num_chroma != self.decoder.config.hidden_size: + audio_hidden_states = self.audio_enc_to_dec_proj(audio_hidden_states) + + # pad or truncate to config.chroma_length + if audio_hidden_states.shape[1] < self.config.chroma_length: + n_repeat = int(math.ceil(self.config.chroma_length / audio_hidden_states.shape[1])) + audio_hidden_states = audio_hidden_states.repeat(1, n_repeat, 1) + audio_hidden_states = audio_hidden_states[:, : self.config.chroma_length] + + if encoder_hidden_states is not None: + encoder_hidden_states = torch.cat([audio_hidden_states, encoder_hidden_states], dim=1) + else: + encoder_hidden_states = audio_hidden_states + + model_kwargs["encoder_hidden_states"] = encoder_hidden_states + + return model_kwargs + + def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): + return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) + + def resize_token_embeddings(self, *args, **kwargs): + raise NotImplementedError( + "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the" + " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or" + " model.decoder.resize_token_embeddings(...))" + ) + + def _maybe_initialize_input_ids_for_generation( + self, + inputs: Optional[torch.Tensor] = None, + bos_token_id: Optional[int] = None, + model_kwargs: Optional[Dict[str, torch.Tensor]] = None, + ) -> torch.LongTensor: + """Initializes input ids for generation, if necessary.""" + if inputs is not None: + return inputs + + if bos_token_id is None: + raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") + + # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with + # soft-prompting or in multimodal implementations built on top of decoder-only language models. + batch_size = 1 + for value in model_kwargs.values(): + if isinstance(value, torch.Tensor): + batch_size = value.shape[0] + break + return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id + + @torch.no_grad() + def generate( + self, + inputs: Optional[torch.Tensor] = None, + generation_config: Optional[GenerationConfig] = None, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + synced_gpus: Optional[bool] = None, + streamer: Optional["BaseStreamer"] = None, + **kwargs, + ): + """ + + Generates sequences of token ids for models with a language modeling head. + + + + Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the + model's default generation configuration. You can override any `generation_config` by passing the corresponding + parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. + + For an overview of generation strategies and code examples, check out the [following + guide](./generation_strategies). + + + + Parameters: + inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): + The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the + method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` + should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of + `input_ids`, `input_values`, `input_features`, or `pixel_values`. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + logits_processor (`LogitsProcessorList`, *optional*): + Custom logits processors that complement the default logits processors built from arguments and + generation config. If a logit processor is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + stopping_criteria (`StoppingCriteriaList`, *optional*): + Custom stopping criteria that complement the default stopping criteria built from arguments and a + generation config. If a stopping criteria is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + synced_gpus (`bool`, *optional*): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder + specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. + + Return: + [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` + or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. + + If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.GreedySearchDecoderOnlyOutput`], + - [`~generation.SampleDecoderOnlyOutput`], + - [`~generation.BeamSearchDecoderOnlyOutput`], + - [`~generation.BeamSampleDecoderOnlyOutput`] + + If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.GreedySearchEncoderDecoderOutput`], + - [`~generation.SampleEncoderDecoderOutput`], + - [`~generation.BeamSearchEncoderDecoderOutput`], + - [`~generation.BeamSampleEncoderDecoderOutput`] + """ + # 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects + if generation_config is None: + generation_config = self.generation_config + + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs + generation_config.validate() + self._validate_model_kwargs(model_kwargs.copy()) + + # 2. Set generation parameters if not already defined + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + + if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: + if model_kwargs.get("attention_mask", None) is None: + logger.warning( + "The attention mask and the pad token id were not set. As a consequence, you may observe " + "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." + ) + eos_token_id = generation_config.eos_token_id + if isinstance(eos_token_id, list): + eos_token_id = eos_token_id[0] + logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") + generation_config.pad_token_id = eos_token_id + + # 3. Define model inputs + # inputs_tensor has to be defined + # model_input_name is defined if model-specific keyword input is passed + # otherwise model_input_name is None + # all model-specific keyword inputs are removed from `model_kwargs` + inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( + inputs, generation_config.bos_token_id, model_kwargs + ) + batch_size = inputs_tensor.shape[0] + + # 4. Define other model kwargs + model_kwargs["output_attentions"] = generation_config.output_attentions + model_kwargs["output_hidden_states"] = generation_config.output_hidden_states + model_kwargs["use_cache"] = generation_config.use_cache + model_kwargs["guidance_scale"] = generation_config.guidance_scale + + if model_kwargs.get("attention_mask", None) is None: + model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( + inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id + ) + + if "encoder_hidden_states" not in model_kwargs: + # encoder_hidden_states are created and added to `model_kwargs` + model_kwargs = self._prepare_encoder_hidden_states_kwargs_for_generation( + inputs_tensor, + model_kwargs, + model_input_name, + guidance_scale=generation_config.guidance_scale, + ) + + # 5. Prepare `input_ids` which will be used for auto-regressive generation + input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( + batch_size=batch_size, + model_input_name=model_input_name, + model_kwargs=model_kwargs, + decoder_start_token_id=generation_config.decoder_start_token_id, + bos_token_id=generation_config.bos_token_id, + device=inputs_tensor.device, + ) + + # 6. Prepare `max_length` depending on other stopping criteria. + input_ids_seq_length = input_ids.shape[-1] + + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + if has_default_max_length and generation_config.max_new_tokens is None: + logger.warning( + f"Using the model-agnostic default `max_length` (={generation_config.max_length}) " + "to control the generation length. We recommend setting `max_new_tokens` to control the maximum length of the generation." + ) + elif generation_config.max_new_tokens is not None: + if not has_default_max_length: + logger.warning( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" + ) + generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length + + if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length: + raise ValueError( + f"Unfeasible length constraints: the minimum length ({generation_config.min_length}) is larger than" + f" the maximum length ({generation_config.max_length})" + ) + if input_ids_seq_length >= generation_config.max_length: + logger.warning( + f"Input length of decoder_input_ids is {input_ids_seq_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing `max_new_tokens`." + ) + + # build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to Musicgen Melody) + input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask( + input_ids, + pad_token_id=generation_config.decoder_start_token_id, + max_length=generation_config.max_length, + ) + # stash the delay mask so that we don't have to recompute in each forward pass + model_kwargs["decoder_delay_pattern_mask"] = decoder_delay_pattern_mask + + # input_ids are ready to be placed on the streamer (if used) + if streamer is not None: + streamer.put(input_ids.cpu()) + + # 7. determine generation mode + is_greedy_gen_mode = ( + (generation_config.num_beams == 1) + and (generation_config.num_beam_groups == 1) + and generation_config.do_sample is False + ) + is_sample_gen_mode = ( + (generation_config.num_beams == 1) + and (generation_config.num_beam_groups == 1) + and generation_config.do_sample is True + ) + + # 8. prepare batched CFG externally (to enable coexistance with the unbatched CFG) + if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1: + logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) + generation_config.guidance_scale = None + + # 9. prepare distribution pre_processing samplers + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_seq_length, + encoder_input_ids=inputs_tensor, + prefix_allowed_tokens_fn=None, + logits_processor=logits_processor, + ) + + # 10. prepare stopping criteria + stopping_criteria = self._get_stopping_criteria( + generation_config=generation_config, stopping_criteria=stopping_criteria + ) + + if is_greedy_gen_mode: + if generation_config.num_return_sequences > 1: + raise ValueError( + "num_return_sequences has to be 1 when doing greedy search, " + f"but is {generation_config.num_return_sequences}." + ) + + # 11. run greedy search + outputs = self.greedy_search( + input_ids, + logits_processor=logits_processor, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + **model_kwargs, + ) + + elif is_sample_gen_mode: + # 11. prepare logits warper + logits_warper = self._get_logits_warper(generation_config) + + # expand input_ids with `num_return_sequences` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_return_sequences, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + + # 12. run sample + outputs = self.sample( + input_ids, + logits_processor=logits_processor, + logits_warper=logits_warper, + stopping_criteria=stopping_criteria, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + **model_kwargs, + ) + + else: + raise ValueError( + "Got incompatible mode for generation, should be one of greedy or sampling. " + "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`." + ) + + if generation_config.return_dict_in_generate: + output_ids = outputs.sequences + else: + output_ids = outputs + + # apply the pattern mask to the final ids + output_ids = self.decoder.apply_delay_pattern_mask(output_ids, model_kwargs["decoder_delay_pattern_mask"]) + + # revert the pattern delay mask by filtering the pad token id + output_ids = output_ids[output_ids != generation_config.pad_token_id].reshape( + batch_size, self.decoder.num_codebooks, -1 + ) + + # append the frame dimension back to the audio codes + output_ids = output_ids[None, ...] + + audio_scales = model_kwargs.get("audio_scales") + if audio_scales is None: + audio_scales = [None] * batch_size + + if self.decoder.config.audio_channels == 1: + output_values = self.audio_encoder.decode( + output_ids, + audio_scales=audio_scales, + ).audio_values + else: + codec_outputs_left = self.audio_encoder.decode(output_ids[:, :, ::2, :], audio_scales=audio_scales) + output_values_left = codec_outputs_left.audio_values + + codec_outputs_right = self.audio_encoder.decode(output_ids[:, :, 1::2, :], audio_scales=audio_scales) + output_values_right = codec_outputs_right.audio_values + + output_values = torch.cat([output_values_left, output_values_right], dim=1) + + if generation_config.return_dict_in_generate: + outputs.sequences = output_values + return outputs + else: + return output_values + + def _update_model_kwargs_for_generation( + self, + outputs: ModelOutput, + model_kwargs: Dict[str, Any], + is_encoder_decoder: bool = False, + standardize_cache_format: bool = False, + model_inputs: Optional[Dict[str, Any]] = None, + ) -> Dict[str, Any]: + # update past_key_values + model_kwargs["past_key_values"] = self._extract_past_from_model_output( + outputs, standardize_cache_format=standardize_cache_format + ) + if getattr(outputs, "state", None) is not None: + model_kwargs["state"] = outputs.state + + # update token_type_ids with last value + if "token_type_ids" in model_kwargs: + token_type_ids = model_kwargs["token_type_ids"] + model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) + + # update decoder attention mask + if "decoder_attention_mask" in model_kwargs: + decoder_attention_mask = model_kwargs["decoder_attention_mask"] + model_kwargs["decoder_attention_mask"] = torch.cat( + [decoder_attention_mask, decoder_attention_mask.new_ones((decoder_attention_mask.shape[0], 1))], + dim=-1, + ) + + return model_kwargs diff --git a/src/transformers/models/musicgen_melody/processing_musicgen_melody.py b/src/transformers/models/musicgen_melody/processing_musicgen_melody.py new file mode 100644 index 00000000000000..a474be38b4cbcf --- /dev/null +++ b/src/transformers/models/musicgen_melody/processing_musicgen_melody.py @@ -0,0 +1,174 @@ +# coding=utf-8 +# Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Text/audio processor class for MusicGen Melody +""" +from typing import List, Optional + +import numpy as np + +from ...processing_utils import ProcessorMixin +from ...utils import to_numpy + + +class MusicgenMelodyProcessor(ProcessorMixin): + r""" + Constructs a MusicGen Melody processor which wraps a Wav2Vec2 feature extractor - for raw audio waveform processing - and a T5 tokenizer into a single processor + class. + + [`MusicgenProcessor`] offers all the functionalities of [`MusicgenMelodyFeatureExtractor`] and [`T5Tokenizer`]. See + [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information. + + Args: + feature_extractor (`MusicgenMelodyFeatureExtractor`): + An instance of [`MusicgenMelodyFeatureExtractor`]. The feature extractor is a required input. + tokenizer (`T5Tokenizer`): + An instance of [`T5Tokenizer`]. The tokenizer is a required input. + """ + + feature_extractor_class = "MusicgenMelodyFeatureExtractor" + tokenizer_class = ("T5Tokenizer", "T5TokenizerFast") + + def __init__(self, feature_extractor, tokenizer): + super().__init__(feature_extractor, tokenizer) + + # Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.get_decoder_prompt_ids + def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): + return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps) + + def __call__(self, audio=None, text=None, **kwargs): + """ + Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `audio` + and `kwargs` arguments to MusicgenMelodyFeatureExtractor's [`~MusicgenMelodyFeatureExtractor.__call__`] if `audio` is not + `None` to pre-process the audio. It also forwards the `text` and `kwargs` arguments to + PreTrainedTokenizer's [`~PreTrainedTokenizer.__call__`] if `text` is not `None`. Please refer to the doctsring of the above two methods for more information. + + Args: + audio (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): + The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case + of a NumPy array/PyTorch tensor, each audio should be a mono-stereo signal of shape (T), where T is the sample length of the audio. + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + kwargs (*optional*): + Remaining dictionary of keyword arguments that will be passed to the feature extractor and/or the + tokenizer. + Returns: + [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **input_features** -- Audio input features to be fed to a model. Returned when `audio` is not `None`. + - **attention_mask** -- List of token indices specifying which tokens should be attended to by the model when `text` is not `None`. + When only `audio` is specified, returns the timestamps attention mask. + """ + + sampling_rate = kwargs.pop("sampling_rate", None) + + if audio is None and text is None: + raise ValueError("You need to specify either an `audio` or `text` input to process.") + + if text is not None: + inputs = self.tokenizer(text, **kwargs) + if audio is not None: + audio_inputs = self.feature_extractor(audio, sampling_rate=sampling_rate, **kwargs) + + if text is None: + return audio_inputs + elif audio is None: + return inputs + else: + inputs["input_features"] = audio_inputs["input_features"] + return inputs + + # Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.batch_decode with padding_mask->attention_mask + def batch_decode(self, *args, **kwargs): + """ + This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids + from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's + [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. + """ + audio_values = kwargs.pop("audio", None) + attention_mask = kwargs.pop("attention_mask", None) + + if len(args) > 0: + audio_values = args[0] + args = args[1:] + + if audio_values is not None: + return self._decode_audio(audio_values, attention_mask=attention_mask) + else: + return self.tokenizer.batch_decode(*args, **kwargs) + + # Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.decode + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the + docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + # Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor._decode_audio with padding_mask->attention_mask + def _decode_audio(self, audio_values, attention_mask: Optional = None) -> List[np.ndarray]: + """ + This method strips any padding from the audio values to return a list of numpy audio arrays. + """ + audio_values = to_numpy(audio_values) + bsz, channels, seq_len = audio_values.shape + + if attention_mask is None: + return list(audio_values) + + attention_mask = to_numpy(attention_mask) + + # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** + # token (so that the generated audio values are **not** treated as padded tokens) + difference = seq_len - attention_mask.shape[-1] + padding_value = 1 - self.feature_extractor.padding_value + attention_mask = np.pad(attention_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value) + + audio_values = audio_values.tolist() + for i in range(bsz): + sliced_audio = np.asarray(audio_values[i])[ + attention_mask[i][None, :] != self.feature_extractor.padding_value + ] + audio_values[i] = sliced_audio.reshape(channels, -1) + + return audio_values + + def get_unconditional_inputs(self, num_samples=1, return_tensors="pt"): + """ + Helper function to get null inputs for unconditional generation, enabling the model to be used without the + feature extractor or tokenizer. + + Args: + num_samples (int, *optional*): + Number of audio samples to unconditionally generate. + + Example: + ```python + >>> from transformers import MusicgenMelodyForConditionalGeneration, MusicgenMelodyProcessor + + >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") + + >>> # get the unconditional (or 'null') inputs for the model + >>> processor = MusicgenMelodyProcessor.from_pretrained("facebook/musicgen-melody") + >>> unconditional_inputs = processor.get_unconditional_inputs(num_samples=1) + + >>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256) + ```""" + inputs = self.tokenizer([""] * num_samples, return_tensors=return_tensors, return_attention_mask=True) + inputs["attention_mask"][:] = 0 + + return inputs diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 44f9aec5e3fb08..50c3c7ea8b456c 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -5876,6 +5876,37 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MusicgenMelodyForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenMelodyForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenMelodyModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MusicgenMelodyPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + MVP_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_torchaudio_objects.py b/src/transformers/utils/dummy_torchaudio_objects.py new file mode 100644 index 00000000000000..58b01f06a8ab5e --- /dev/null +++ b/src/transformers/utils/dummy_torchaudio_objects.py @@ -0,0 +1,16 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class MusicgenMelodyFeatureExtractor(metaclass=DummyObject): + _backends = ["torchaudio"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torchaudio"]) + + +class MusicgenMelodyProcessor(metaclass=DummyObject): + _backends = ["torchaudio"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torchaudio"]) diff --git a/tests/models/musicgen_melody/__init__.py b/tests/models/musicgen_melody/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py b/tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py new file mode 100644 index 00000000000000..697e3fb146ec17 --- /dev/null +++ b/tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py @@ -0,0 +1,231 @@ +# coding=utf-8 +# Copyright 2024 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import itertools +import math +import os +import random +import tempfile +import unittest + +import numpy as np + +from transformers.testing_utils import ( + check_json_file_has_correct_format, + require_torch, + require_torchaudio, +) +from transformers.utils.import_utils import is_torchaudio_available + +from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin + + +if is_torchaudio_available(): + import torch + + from transformers import MusicgenMelodyFeatureExtractor + + +global_rng = random.Random() + + +# Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list +def floats_list(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor""" + if rng is None: + rng = global_rng + + values = [] + for batch_idx in range(shape[0]): + values.append([]) + for _ in range(shape[1]): + values[-1].append(rng.random() * scale) + + return values + + +# Copied from tests.models.musicgen.test_modeling_musicgen.get_bip_bip +def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000): + """Produces a series of 'bip bip' sounds at a given frequency.""" + timesteps = np.arange(int(duration * sample_rate)) / sample_rate + wav = np.cos(2 * math.pi * 440 * timesteps) + time_period = (timesteps % (2 * bip_duration)) / (2 * bip_duration) + envelope = time_period >= 0.5 + return wav * envelope + + +@require_torch +@require_torchaudio +class MusicgenMelodyFeatureExtractionTester(unittest.TestCase): + def __init__( + self, + parent, + batch_size=7, + min_seq_length=400, + max_seq_length=2000, + feature_size=12, + padding_value=0.0, + sampling_rate=4_000, + return_attention_mask=True, + ): + self.parent = parent + self.batch_size = batch_size + self.min_seq_length = min_seq_length + self.max_seq_length = max_seq_length + self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) + self.padding_value = padding_value + self.sampling_rate = sampling_rate + self.return_attention_mask = return_attention_mask + self.feature_size = feature_size + self.num_chroma = feature_size + + def prepare_feat_extract_dict(self): + return { + "feature_size": self.feature_size, + "padding_value": self.padding_value, + "sampling_rate": self.sampling_rate, + "return_attention_mask": self.return_attention_mask, + } + + # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTester.prepare_inputs_for_common + def prepare_inputs_for_common(self, equal_length=False, numpify=False): + def _flatten(list_of_lists): + return list(itertools.chain(*list_of_lists)) + + if equal_length: + speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] + else: + # make sure that inputs increase in size + speech_inputs = [ + floats_list((x, self.feature_size)) + for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) + ] + if numpify: + speech_inputs = [np.asarray(x) for x in speech_inputs] + return speech_inputs + + +@require_torchaudio +@require_torch +class MusicgenMelodyFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): + feature_extraction_class = MusicgenMelodyFeatureExtractor if is_torchaudio_available() else None + + def setUp(self): + self.feat_extract_tester = MusicgenMelodyFeatureExtractionTester(self) + + # Copied from tests.models.seamless_m4t.test_feature_extraction_seamless_m4t.SeamlessM4TFeatureExtractionTest.test_feat_extract_from_and_save_pretrained + def test_feat_extract_from_and_save_pretrained(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] + check_json_file_has_correct_format(saved_file) + feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + self.assertDictEqual(dict_first, dict_second) + + # Copied from tests.models.seamless_m4t.test_feature_extraction_seamless_m4t.SeamlessM4TFeatureExtractionTest.test_feat_extract_to_json_file + def test_feat_extract_to_json_file(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + json_file_path = os.path.join(tmpdirname, "feat_extract.json") + feat_extract_first.to_json_file(json_file_path) + feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + self.assertEqual(dict_first, dict_second) + + def test_call(self): + # Tests that all call wrap to encode_plus and batch_encode_plus + feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) + # create three inputs of length 800, 1000, and 1200 + speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] + np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] + + # Test feature size + input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors="np").input_features + self.assertTrue(input_features.ndim == 3) + self.assertTrue(input_features.shape[0] == 3) + # Ignore copy + self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) + + # Test not batched input + encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features + self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) + + # Test batched + encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + # Test 2-D numpy arrays are batched. + speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] + np_speech_inputs = np.asarray(speech_inputs) + encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + @require_torchaudio + def test_call_from_demucs(self): + # Tests that all call wrap to encode_plus and batch_encode_plus + feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) + + # (batch_size, num_stems, channel_size, audio_length) + inputs = torch.rand([4, 5, 2, 44000]) + + # Test feature size + input_features = feature_extractor(inputs, padding=True, return_tensors="np").input_features + self.assertTrue(input_features.ndim == 3) + self.assertTrue(input_features.shape[0] == 4) + self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) + + # Test single input + encoded_sequences_1 = feature_extractor(inputs[[0]], return_tensors="np").input_features + self.assertTrue(np.allclose(encoded_sequences_1[0], input_features[0], atol=1e-3)) + + # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad with input_features->input_features + def test_double_precision_pad(self): + import torch + + feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) + np_speech_inputs = np.random.rand(100, 32).astype(np.float64) + py_speech_inputs = np_speech_inputs.tolist() + + for inputs in [py_speech_inputs, np_speech_inputs]: + np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") + self.assertTrue(np_processed.input_features.dtype == np.float32) + pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") + self.assertTrue(pt_processed.input_features.dtype == torch.float32) + + def test_integration(self): + EXPECTED_INPUT_FEATURES = torch.zeros([2, 8, 12]) + EXPECTED_INPUT_FEATURES[0, :6, 9] = 1 + EXPECTED_INPUT_FEATURES[0, 6:, 0] = 1 + EXPECTED_INPUT_FEATURES[1, :, 9] = 1 + + input_speech = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] + feature_extractor = MusicgenMelodyFeatureExtractor() + input_features = feature_extractor(input_speech, return_tensors="pt").input_features + + self.assertEqual(input_features.shape, (2, 8, 12)) + self.assertTrue((input_features == EXPECTED_INPUT_FEATURES).all()) diff --git a/tests/models/musicgen_melody/test_modeling_musicgen_melody.py b/tests/models/musicgen_melody/test_modeling_musicgen_melody.py new file mode 100644 index 00000000000000..af10aa884633a1 --- /dev/null +++ b/tests/models/musicgen_melody/test_modeling_musicgen_melody.py @@ -0,0 +1,1449 @@ +# coding=utf-8 +# Copyright 2024, The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch Musicgen Melody model. """ +import copy +import inspect +import math +import unittest + +import numpy as np + +from transformers import ( + EncodecConfig, + MusicgenMelodyConfig, + MusicgenMelodyDecoderConfig, + PretrainedConfig, + T5Config, +) +from transformers.testing_utils import ( + is_torch_available, + is_torchaudio_available, + require_torch, + require_torch_fp16, + require_torchaudio, + slow, + torch_device, +) +from transformers.utils import cached_property + +from ...generation.test_utils import GenerationTesterMixin +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor +from ...test_pipeline_mixin import PipelineTesterMixin + + +if is_torch_available(): + import torch + + from transformers import ( + MusicgenMelodyForCausalLM, + MusicgenMelodyForConditionalGeneration, + MusicgenMelodyModel, + set_seed, + ) + from transformers.generation import ( + GenerateDecoderOnlyOutput, + InfNanRemoveLogitsProcessor, + LogitsProcessorList, + ) + +if is_torchaudio_available(): + from transformers import MusicgenMelodyProcessor + + +def _config_zero_init(config): + configs_no_init = copy.deepcopy(config) + for key in configs_no_init.__dict__.keys(): + if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: + setattr(configs_no_init, key, 1e-10) + if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): + no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) + setattr(configs_no_init, key, no_init_subconfig) + return configs_no_init + + +def prepare_musicgen_melody_decoder_inputs_dict( + config, + input_ids, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, +): + if attention_mask is None: + attention_mask = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1])[:, 0, :] + attention_mask = attention_mask.ne(config.pad_token_id) + if head_mask is None: + head_mask = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=torch_device) + if encoder_attention_mask is None and encoder_hidden_states is not None: + encoder_attention_mask = torch.ones(encoder_hidden_states.shape[:2], device=torch_device) + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "encoder_hidden_states": encoder_hidden_states, + "encoder_attention_mask": encoder_attention_mask, + "head_mask": head_mask, + } + + +class MusicgenMelodyDecoderTester: + def __init__( + self, + parent, + batch_size=3, # need batch_size != num_hidden_layers because of #29297 + seq_length=7, + is_training=False, + use_labels=False, + vocab_size=99, + hidden_size=16, + num_hidden_layers=2, + num_attention_heads=4, + intermediate_size=4, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=100, + pad_token_id=99, + bos_token_id=99, + num_codebooks=4, + conditional_seq_length=4, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.pad_token_id = pad_token_id + self.bos_token_id = bos_token_id + self.num_codebooks = num_codebooks + self.conditional_seq_length = conditional_seq_length + self.encoder_seq_length = conditional_seq_length + seq_length + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size * self.num_codebooks, self.seq_length], self.vocab_size) + encoder_hidden_states = floats_tensor([self.batch_size, self.conditional_seq_length, self.hidden_size]) + + config = self.get_config() + inputs_dict = prepare_musicgen_melody_decoder_inputs_dict( + config, input_ids, encoder_hidden_states=encoder_hidden_states + ) + return config, inputs_dict + + def get_config(self): + config = MusicgenMelodyDecoderConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + d_ff=self.intermediate_size, + pad_token_id=self.pad_token_id, + decoder_start_token_id=self.bos_token_id, + bos_token_id=self.bos_token_id, + num_codebooks=self.num_codebooks, + tie_word_embeddings=False, + ) + return config + + def prepare_config_and_inputs_for_common(self): + config, inputs_dict = self.prepare_config_and_inputs() + return config, inputs_dict + + +@require_torch +class MusicgenMelodyDecoderTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): + all_model_classes = (MusicgenMelodyModel, MusicgenMelodyForCausalLM) if is_torch_available() else () + greedy_sample_model_classes = ( + (MusicgenMelodyForCausalLM,) if is_torch_available() else () + ) # the model uses a custom generation method so we only run a specific subset of the generation tests + test_pruning = False + test_resize_embeddings = False + + def setUp(self): + self.model_tester = MusicgenMelodyDecoderTester(self) + self.config_tester = ConfigTester(self, config_class=MusicgenMelodyDecoderConfig, hidden_size=16) + + def test_config(self): + self.config_tester.run_common_tests() + + # override since we have to compute the input embeddings over codebooks + def test_inputs_embeds(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + model.to(torch_device) + model.eval() + + inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) + + input_ids = inputs["input_ids"] + del inputs["input_ids"] + + embed_tokens = model.get_input_embeddings() + + input_ids = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1]) + + inputs["inputs_embeds"] = sum( + [embed_tokens[codebook](input_ids[:, codebook]) for codebook in range(config.num_codebooks)] + ) + + with torch.no_grad(): + model(**inputs)[0] + + # override since we have embeddings / LM heads over multiple codebooks + def test_model_common_attributes(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + first_embed = model.get_input_embeddings()[0] + self.assertIsInstance(first_embed, torch.nn.Embedding) + lm_heads = model.get_output_embeddings() + self.assertTrue(lm_heads is None or isinstance(lm_heads[0], torch.nn.Linear)) + + @unittest.skip("this model doesn't support all arguments tested") + def test_model_outputs_equivalence(self): + pass + + @unittest.skip("this model has multiple inputs embeds and lm heads that should not be tied") + def test_tie_model_weights(self): + pass + + @unittest.skip("this model has multiple inputs embeds and lm heads that should not be tied") + def test_tied_weights_keys(self): + pass + + def _get_input_ids_and_config(self, batch_size=2): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + input_ids = inputs_dict["input_ids"] + + # take max batch_size + sequence_length = input_ids.shape[-1] + input_ids = input_ids[: batch_size * config.num_codebooks, :] + + # generate max 3 tokens + max_length = input_ids.shape[-1] + 3 + attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long) + return config, input_ids, attention_mask, max_length + + @staticmethod + def _get_logits_processor_and_kwargs( + input_length, + eos_token_id, + forced_bos_token_id=None, + forced_eos_token_id=None, + max_length=None, + diversity_penalty=None, + ): + process_kwargs = { + "min_length": input_length + 1 if max_length is None else max_length - 1, + } + logits_processor = LogitsProcessorList() + return process_kwargs, logits_processor + + # override since we don't expect the outputs of `.generate` and `.greedy_search` to be the same, since we perform + # additional post-processing in the former + def test_greedy_generate_dict_outputs(self): + for model_class in self.greedy_sample_model_classes: + # disable cache + config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() + config.use_cache = False + model = model_class(config).to(torch_device).eval() + output_greedy, output_generate = self._greedy_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + max_length=max_length, + output_scores=True, + output_hidden_states=True, + output_attentions=True, + return_dict_in_generate=True, + ) + + self.assertIsInstance(output_greedy, GenerateDecoderOnlyOutput) + self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) + + self.assertNotIn(config.pad_token_id, output_generate) + + # override since we don't expect the outputs of `.generate` and `.greedy_search` to be the same, since we perform + # additional post-processing in the former + def test_greedy_generate_dict_outputs_use_cache(self): + for model_class in self.greedy_sample_model_classes: + # enable cache + config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() + + config.use_cache = True + config.is_decoder = True + model = model_class(config).to(torch_device).eval() + output_greedy, output_generate = self._greedy_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + max_length=max_length, + output_scores=True, + output_hidden_states=True, + output_attentions=True, + return_dict_in_generate=True, + ) + + self.assertIsInstance(output_greedy, GenerateDecoderOnlyOutput) + self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) + + # override since we don't expect the outputs of `.generate` and `.sample` to be the same, since we perform + # additional post-processing in the former + def test_sample_generate(self): + for model_class in self.greedy_sample_model_classes: + config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() + model = model_class(config).to(torch_device).eval() + + process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( + input_ids.shape[-1], + model.config.eos_token_id, + forced_bos_token_id=model.config.forced_bos_token_id, + forced_eos_token_id=model.config.forced_eos_token_id, + max_length=max_length, + ) + logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=2) + + # check `generate()` and `sample()` are equal + output_sample, output_generate = self._sample_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + max_length=max_length, + num_return_sequences=3, + logits_processor=logits_processor, + logits_warper=logits_warper, + logits_warper_kwargs=logits_warper_kwargs, + process_kwargs=process_kwargs, + ) + self.assertIsInstance(output_sample, torch.Tensor) + self.assertIsInstance(output_generate, torch.Tensor) + + # override since we don't expect the outputs of `.generate` and `.sample` to be the same, since we perform + # additional post-processing in the former + def test_sample_generate_dict_output(self): + for model_class in self.greedy_sample_model_classes: + # disable cache + config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() + config.use_cache = False + model = model_class(config).to(torch_device).eval() + + process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( + input_ids.shape[-1], + model.config.eos_token_id, + forced_bos_token_id=model.config.forced_bos_token_id, + forced_eos_token_id=model.config.forced_eos_token_id, + max_length=max_length, + ) + logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) + + output_sample, output_generate = self._sample_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + max_length=max_length, + num_return_sequences=1, + logits_processor=logits_processor, + logits_warper=logits_warper, + logits_warper_kwargs=logits_warper_kwargs, + process_kwargs=process_kwargs, + output_scores=True, + output_hidden_states=True, + output_attentions=True, + return_dict_in_generate=True, + ) + + self.assertIsInstance(output_sample, GenerateDecoderOnlyOutput) + self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) + + def test_greedy_generate_stereo_outputs(self): + for model_class in self.greedy_sample_model_classes: + config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() + config.audio_channels = 2 + model = model_class(config).to(torch_device).eval() + output_greedy, output_generate = self._greedy_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + max_length=max_length, + output_scores=True, + output_hidden_states=True, + output_attentions=True, + return_dict_in_generate=True, + ) + + self.assertIsInstance(output_greedy, GenerateDecoderOnlyOutput) + self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) + + self.assertNotIn(config.pad_token_id, output_generate) + + +def prepare_musicgen_melody_inputs_dict( + config, + input_ids, + decoder_input_ids, + attention_mask=None, + decoder_attention_mask=None, + head_mask=None, + decoder_head_mask=None, +): + if decoder_attention_mask is None: + decoder_attention_mask = decoder_input_ids.reshape( + -1, config.decoder.num_codebooks, decoder_input_ids.shape[-1] + )[:, 0, :] + decoder_attention_mask = decoder_attention_mask.ne(config.decoder.pad_token_id) + if head_mask is None: + head_mask = torch.ones( + config.text_encoder.num_hidden_layers, config.text_encoder.num_attention_heads, device=torch_device + ) + if decoder_head_mask is None: + decoder_head_mask = torch.ones( + config.decoder.num_hidden_layers, config.decoder.num_attention_heads, device=torch_device + ) + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "decoder_input_ids": decoder_input_ids, + "decoder_attention_mask": decoder_attention_mask, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + } + + +class MusicgenMelodyTester: + def __init__( + self, + parent, + batch_size=3, # need batch_size != num_hidden_layers because of #29297 + seq_length=7, + is_training=False, + use_labels=False, + vocab_size=99, + hidden_size=16, + num_hidden_layers=2, + num_attention_heads=4, + intermediate_size=4, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=100, + pad_token_id=99, + bos_token_id=99, + num_codebooks=4, + num_filters=4, + codebook_size=128, + conditional_seq_length=3, + chroma_length=24, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.pad_token_id = pad_token_id + self.bos_token_id = bos_token_id + self.num_codebooks = num_codebooks + self.num_filters = num_filters + self.codebook_size = codebook_size + self.conditional_seq_length = conditional_seq_length + self.chroma_length = chroma_length + self.encoder_seq_length = conditional_seq_length + seq_length + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.conditional_seq_length], self.vocab_size) + decoder_input_ids = ids_tensor([self.batch_size * self.num_codebooks, self.seq_length], self.vocab_size) + + config = self.get_config() + inputs_dict = prepare_musicgen_melody_inputs_dict(config, input_ids, decoder_input_ids=decoder_input_ids) + return config, inputs_dict + + def get_config(self): + text_encoder_config = T5Config( + vocab_size=self.vocab_size, + d_model=self.hidden_size, + d_ff=self.intermediate_size, + num_layers=self.num_hidden_layers, + num_heads=self.num_attention_heads, + ) + audio_encoder_config = EncodecConfig( + hidden_size=self.vocab_size, + compress=1, + num_filters=self.num_filters, + codebook_size=self.codebook_size, + codebook_dim=self.vocab_size, + ) + decoder_config = MusicgenMelodyDecoderConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + ffn_dim=self.intermediate_size, + pad_token_id=self.pad_token_id, + decoder_start_token_id=self.bos_token_id, + bos_token_id=self.bos_token_id, + num_codebooks=self.num_codebooks, + tie_word_embeddings=False, + ) + config = MusicgenMelodyConfig.from_sub_models_config( + text_encoder_config, audio_encoder_config, decoder_config, chroma_length=self.chroma_length + ) + return config + + def prepare_config_and_inputs_for_common(self): + config, inputs_dict = self.prepare_config_and_inputs() + return config, inputs_dict + + +@require_torch +# Copied from tests.models.musicgen.test_modeling_musicgen.MusicgenTest with Musicgen->MusicgenMelody, musicgen->musicgen_melody, EncoderDecoder->DecoderOnly, input_values->input_features +class MusicgenMelodyTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): + all_model_classes = (MusicgenMelodyForConditionalGeneration,) if is_torch_available() else () + greedy_sample_model_classes = (MusicgenMelodyForConditionalGeneration,) if is_torch_available() else () + pipeline_model_mapping = {"text-to-audio": MusicgenMelodyForConditionalGeneration} if is_torch_available() else {} + test_pruning = False # training is not supported yet for MusicGen + test_headmasking = False + test_resize_embeddings = False + # not to test torchscript as the model tester doesn't prepare `input_features` and `padding_mask` + # (and `torchscript` hates `None` values). + test_torchscript = False + + def setUp(self): + self.model_tester = MusicgenMelodyTester(self) + + # Ignore copy + def _check_output_with_attentions(self, outputs, config, input_ids, decoder_input_ids): + decoder_config = config.decoder + + decoder_attentions = outputs["attentions"] + num_decoder_layers = decoder_config.num_hidden_layers + self.assertEqual(len(decoder_attentions), num_decoder_layers) + + output_shape = decoder_input_ids.shape[-1] + input_ids.shape[-1] + self.model_tester.chroma_length + self.assertEqual( + decoder_attentions[0].shape[-3:], + (decoder_config.num_attention_heads, output_shape, output_shape), + ) + + def check_musicgen_melody_model_output_attentions( + self, + model_class, + config, + input_ids, + attention_mask, + decoder_input_ids, + decoder_attention_mask, + **kwargs, + ): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model( + input_ids=input_ids, + decoder_input_ids=decoder_input_ids, + attention_mask=attention_mask, + decoder_attention_mask=decoder_attention_mask, + output_attentions=True, + **kwargs, + ) + self._check_output_with_attentions(outputs, config, input_ids, decoder_input_ids) + + # Ignore copy + def check_musicgen_melody_model_output_attentions_from_config( + self, + model_class, + config, + input_ids, + attention_mask, + decoder_input_ids, + decoder_attention_mask, + **kwargs, + ): + # Similar to `check_musicgen_melody_model_output_attentions`, but with `output_attentions` triggered from the + # config file. Contrarily to most models, changing the model's config won't work -- the defaults are loaded + # from the inner models' configurations. + config.output_attentions = True # model config -> won't work + + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model( + input_ids=input_ids, + decoder_input_ids=decoder_input_ids, + attention_mask=attention_mask, + decoder_attention_mask=decoder_attention_mask, + **kwargs, + ) + self.assertTrue(all(key not in outputs for key in ["encoder_attentions", "decoder_attentions"])) + config.text_encoder.output_attentions = True # inner model config -> will work + config.audio_encoder.output_attentions = True + config.decoder.output_attentions = True + + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model( + input_ids=input_ids, + decoder_input_ids=decoder_input_ids, + attention_mask=attention_mask, + decoder_attention_mask=decoder_attention_mask, + **kwargs, + ) + self._check_output_with_attentions(outputs, config, input_ids, decoder_input_ids) + + # override since changing `output_attentions` from the top-level model config won't work + def test_attention_outputs(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + self.check_musicgen_melody_model_output_attentions(model_class, config, **inputs_dict) + self.check_musicgen_melody_model_output_attentions_from_config(model_class, config, **inputs_dict) + + # override since we have a specific forward signature for musicgen_melody + # Ignore copy + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = [ + "input_ids", + "attention_mask", + "input_features", + "decoder_input_ids", + "decoder_attention_mask", + ] + if "head_mask" and "decoder_head_mask" in arg_names: + expected_arg_names.extend(["head_mask", "decoder_head_mask"]) + + self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) + + # override since changing `gradient_checkpointing` from the top-level model config won't work + def test_gradient_checkpointing_backward_compatibility(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + if not model_class.supports_gradient_checkpointing: + continue + + config.text_encoder.gradient_checkpointing = True + config.audio_encoder.gradient_checkpointing = True + config.decoder.gradient_checkpointing = True + model = model_class(config) + self.assertTrue(model.is_gradient_checkpointing) + + # skip as this model has multiple inputs embeds and lm heads that should not be tied + def test_tie_model_weights(self): + pass + + # skip as this model has multiple inputs embeds and lm heads that should not be tied + def test_tied_model_weights_key_ignore(self): + pass + + # skip as this model has multiple inputs embeds and lm heads that should not be tied + def test_tied_weights_keys(self): + pass + + # override since changing `output_hidden_states` / `output_attentions` from the top-level model config won't work + # Ignore copy + def test_retain_grad_hidden_states_attentions(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.text_encoder.output_hidden_states = True + config.audio_encoder.output_hidden_states = True + config.decoder.output_hidden_states = True + + config.text_encoder.output_attentions = True + config.decoder.output_attentions = True + + # no need to test all models as different heads yield the same functionality + model_class = self.all_model_classes[0] + model = model_class(config) + model.to(torch_device) + + inputs = self._prepare_for_class(inputs_dict, model_class) + + outputs = model(**inputs) + + output = outputs[0] + + encoder_hidden_states = outputs.encoder_hidden_states + encoder_hidden_states.retain_grad() + + decoder_hidden_states = outputs.hidden_states[0] + decoder_hidden_states.retain_grad() + + if self.has_attentions: + decoder_attentions = outputs.attentions[0] + decoder_attentions.retain_grad() + + output.flatten()[0].backward(retain_graph=True) + + self.assertIsNotNone(encoder_hidden_states.grad) + self.assertIsNotNone(decoder_hidden_states.grad) + + if self.has_attentions: + self.assertIsNotNone(decoder_attentions.grad) + + # override since changing `output_hidden_states` from the top-level model config won't work + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.encoder_hidden_states + + expected_num_layers = self.model_tester.num_hidden_layers + 1 + self.assertEqual(len(hidden_states), expected_num_layers) + + # Ignore copy + seq_length = self.model_tester.conditional_seq_length + self.model_tester.chroma_length + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [seq_length, self.model_tester.hidden_size], + ) + + # Ignore copy + seq_length = self.model_tester.encoder_seq_length + self.model_tester.chroma_length + # Ignore copy + expected_num_layers = self.model_tester.num_hidden_layers + 1 + # Ignore copy + hidden_states = outputs.hidden_states + self.assertIsInstance(hidden_states, (list, tuple)) + self.assertEqual(len(hidden_states), expected_num_layers) + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [seq_length, self.model_tester.hidden_size], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.text_encoder.output_hidden_states = True + config.audio_encoder.output_hidden_states = True + config.decoder.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + # override since the conv layers and lstm's in encodec are exceptions + def test_initialization(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + configs_no_init = _config_zero_init(config) + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + for name, param in model.named_parameters(): + uniform_init_parms = ["conv"] + ignore_init = ["lstm"] + if param.requires_grad: + if any(x in name for x in uniform_init_parms): + self.assertTrue( + -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + elif not any(x in name for x in ignore_init): + self.assertIn( + ((param.data.mean() * 1e9).round() / 1e9).item(), + [0.0, 1.0], + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + + # override since we have embeddings / LM heads over multiple codebooks + def test_model_common_attributes(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + self.assertIsInstance(model.get_input_embeddings(), torch.nn.Embedding) + lm_heads = model.get_output_embeddings() + self.assertTrue(lm_heads is None or isinstance(lm_heads[0], torch.nn.Linear)) + + def _get_input_ids_and_config(self, batch_size=2): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + input_ids = inputs_dict["input_ids"] + + # take max batch_size + sequence_length = input_ids.shape[-1] + input_ids = input_ids[:batch_size, :] + attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long) + + # generate max 3 tokens + decoder_input_ids = inputs_dict["decoder_input_ids"] + max_length = decoder_input_ids.shape[-1] + 3 + decoder_input_ids = decoder_input_ids[: batch_size * config.decoder.num_codebooks, :] + return config, input_ids, attention_mask, decoder_input_ids, max_length + + # override since the `input_ids` cannot be used as the `decoder_input_ids` for musicgen_melody (input / outputs are + # different modalities -> different shapes) + def _greedy_generate( + self, + model, + input_ids, + attention_mask, + decoder_input_ids, + max_length, + output_scores=False, + output_attentions=False, + output_hidden_states=False, + return_dict_in_generate=False, + ): + logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( + input_ids.shape[-1], + eos_token_id=model.config.eos_token_id, + forced_bos_token_id=model.config.forced_bos_token_id, + forced_eos_token_id=model.config.forced_eos_token_id, + max_length=max_length, + ) + + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} + output_generate = model.generate( + input_ids, + do_sample=False, + num_beams=1, + max_length=max_length, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + output_scores=output_scores, + return_dict_in_generate=return_dict_in_generate, + remove_invalid_values=True, + **logits_process_kwargs, + **model_kwargs, + ) + + with torch.no_grad(): + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} + output_greedy = model.greedy_search( + decoder_input_ids, + max_length=max_length, + logits_processor=logits_processor, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + output_scores=output_scores, + return_dict_in_generate=return_dict_in_generate, + # Ignore copy + **model_kwargs, + ) + return output_greedy, output_generate + + # override since the `input_ids` cannot be used as the `decoder_input_ids` for musicgen_melody (input / outputs are + # different modalities -> different shapes) + # Ignore copy + def _sample_generate( + self, + model, + input_ids, + attention_mask, + decoder_input_ids, + max_length, + num_return_sequences, + logits_processor, + logits_warper, + logits_warper_kwargs, + process_kwargs, + output_scores=False, + output_attentions=False, + output_hidden_states=False, + return_dict_in_generate=False, + ): + torch.manual_seed(0) + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} + output_generate = model.generate( + input_ids, + do_sample=True, + num_beams=1, + max_length=max_length, + num_return_sequences=num_return_sequences, + output_scores=output_scores, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict_in_generate=return_dict_in_generate, + remove_invalid_values=True, + **logits_warper_kwargs, + **process_kwargs, + **model_kwargs, + ) + + torch.manual_seed(0) + + # prevent flaky generation test failures + logits_processor.append(InfNanRemoveLogitsProcessor()) + + with torch.no_grad(): + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} + output_sample = model.sample( + decoder_input_ids.repeat_interleave(num_return_sequences, dim=0), + max_length=max_length, + logits_processor=logits_processor, + logits_warper=logits_warper, + output_scores=output_scores, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict_in_generate=return_dict_in_generate, + **model_kwargs, + ) + + return output_sample, output_generate + + @staticmethod + def _get_logits_processor_and_kwargs( + input_length, + eos_token_id, + forced_bos_token_id=None, + forced_eos_token_id=None, + max_length=None, + diversity_penalty=None, + ): + process_kwargs = { + "min_length": input_length + 1 if max_length is None else max_length - 1, + } + logits_processor = LogitsProcessorList() + return process_kwargs, logits_processor + + def test_greedy_generate_dict_outputs(self): + for model_class in self.greedy_sample_model_classes: + # disable cache + config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() + config.use_cache = False + model = model_class(config).to(torch_device).eval() + output_greedy, output_generate = self._greedy_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + decoder_input_ids=decoder_input_ids, + max_length=max_length, + output_scores=True, + output_hidden_states=True, + output_attentions=True, + return_dict_in_generate=True, + ) + + self.assertIsInstance(output_greedy, GenerateDecoderOnlyOutput) + self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) + + self.assertNotIn(config.pad_token_id, output_generate) + + def test_greedy_generate_dict_outputs_use_cache(self): + for model_class in self.greedy_sample_model_classes: + # enable cache + config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() + + config.use_cache = True + config.is_decoder = True + model = model_class(config).to(torch_device).eval() + output_greedy, output_generate = self._greedy_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + decoder_input_ids=decoder_input_ids, + max_length=max_length, + output_scores=True, + output_hidden_states=True, + output_attentions=True, + return_dict_in_generate=True, + ) + + self.assertIsInstance(output_greedy, GenerateDecoderOnlyOutput) + self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) + + def test_sample_generate(self): + for model_class in self.greedy_sample_model_classes: + config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() + model = model_class(config).to(torch_device).eval() + + process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( + input_ids.shape[-1], + model.config.eos_token_id, + forced_bos_token_id=model.config.forced_bos_token_id, + forced_eos_token_id=model.config.forced_eos_token_id, + max_length=max_length, + ) + logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=2) + + # check `generate()` and `sample()` are equal + output_sample, output_generate = self._sample_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + decoder_input_ids=decoder_input_ids, + max_length=max_length, + num_return_sequences=1, + logits_processor=logits_processor, + logits_warper=logits_warper, + logits_warper_kwargs=logits_warper_kwargs, + process_kwargs=process_kwargs, + ) + self.assertIsInstance(output_sample, torch.Tensor) + self.assertIsInstance(output_generate, torch.Tensor) + + def test_sample_generate_dict_output(self): + for model_class in self.greedy_sample_model_classes: + # disable cache + config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() + config.use_cache = False + model = model_class(config).to(torch_device).eval() + + process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( + input_ids.shape[-1], + model.config.eos_token_id, + forced_bos_token_id=model.config.forced_bos_token_id, + forced_eos_token_id=model.config.forced_eos_token_id, + max_length=max_length, + ) + logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) + + output_sample, output_generate = self._sample_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + decoder_input_ids=decoder_input_ids, + max_length=max_length, + num_return_sequences=3, + logits_processor=logits_processor, + logits_warper=logits_warper, + logits_warper_kwargs=logits_warper_kwargs, + process_kwargs=process_kwargs, + output_scores=True, + output_hidden_states=True, + output_attentions=True, + return_dict_in_generate=True, + ) + + self.assertIsInstance(output_sample, GenerateDecoderOnlyOutput) + self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) + + def test_generate_without_input_ids(self): + config, _, _, _, max_length = self._get_input_ids_and_config() + + # if no bos token id => cannot generate from None + if config.bos_token_id is None: + return + + for model_class in self.greedy_sample_model_classes: + model = model_class(config).to(torch_device) + model.eval() + + output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) + self.assertIsNotNone(output_ids_generate) + + @require_torch_fp16 + def test_generate_fp16(self): + config, input_dict = self.model_tester.prepare_config_and_inputs() + + for model_class in self.greedy_sample_model_classes: + model = model_class(config).eval().to(torch_device) + model.half() + # greedy + model.generate(input_dict["input_ids"], attention_mask=input_dict["attention_mask"], max_new_tokens=10) + # sampling + model.generate( + input_dict["input_ids"], attention_mask=input_dict["attention_mask"], do_sample=True, max_new_tokens=10 + ) + + def test_greedy_generate_stereo_outputs(self): + for model_class in self.greedy_sample_model_classes: + config, input_ids, attention_mask, decoder_input_ids, max_length = self._get_input_ids_and_config() + config.audio_channels = 2 + + model = model_class(config).to(torch_device).eval() + output_greedy, output_generate = self._greedy_generate( + model=model, + input_ids=input_ids.to(torch_device), + attention_mask=attention_mask.to(torch_device), + decoder_input_ids=decoder_input_ids, + max_length=max_length, + output_scores=True, + output_hidden_states=True, + output_attentions=True, + return_dict_in_generate=True, + ) + + self.assertIsInstance(output_greedy, GenerateDecoderOnlyOutput) + self.assertIsInstance(output_generate, GenerateDecoderOnlyOutput) + + self.assertNotIn(config.pad_token_id, output_generate) + + +# Copied from tests.models.musicgen.test_modeling_musicgen.get_bip_bip +def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000): + """Produces a series of 'bip bip' sounds at a given frequency.""" + timesteps = np.arange(int(duration * sample_rate)) / sample_rate + wav = np.cos(2 * math.pi * 440 * timesteps) + time_period = (timesteps % (2 * bip_duration)) / (2 * bip_duration) + envelope = time_period >= 0.5 + return wav * envelope + + +@require_torch +@require_torchaudio +class MusicgenMelodyIntegrationTests(unittest.TestCase): + @cached_property + def model(self): + return MusicgenMelodyForConditionalGeneration.from_pretrained("ylacombe/musicgen-melody").to(torch_device) + + @cached_property + def processor(self): + return MusicgenMelodyProcessor.from_pretrained("ylacombe/musicgen-melody") + + @slow + def test_logits_text_prompt(self): + model = self.model + processor = self.processor + + inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") + + # prepare the encoder inputs + input_ids = inputs.input_ids.to(torch_device) + attention_mask = inputs.attention_mask.to(torch_device) + + # prepare the decoder inputs + pad_token_id = model.generation_config.pad_token_id + decoder_input_ids = ( + torch.ones((input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long).to(torch_device) + * pad_token_id + ) + + with torch.no_grad(): + logits = model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + ).logits + + # fmt: off + EXPECTED_LOGITS = torch.tensor([ + 1.1100, -2.1065, -3.7699, -0.7102, 1.3707, -1.7028, -2.6802, -6.0367, + 1.0504, -2.5358, -4.3497, 0.7338, 0.4823, -2.5260, 1.2717, 1.5427 + ]) + # fmt: on + EXPECTED_OUTPUT_LENGTH = input_ids.shape[1] + 1 + self.model.config.chroma_length + + logits_shape = ( + input_ids.shape[0] * model.decoder.num_codebooks, + EXPECTED_OUTPUT_LENGTH, + model.decoder.config.vocab_size, + ) + + self.assertTrue(logits.shape == logits_shape) + self.assertTrue(torch.allclose(logits[0, -1, :16].cpu(), EXPECTED_LOGITS, atol=1e-4)) + + @slow + def test_logits_text_audio_prompt(self): + model = self.model + processor = self.processor + + audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] + text = ["80s music", "Club techno"] + + inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt") + + # prepare the text encoder inputs + input_ids = inputs.input_ids.to(torch_device) + attention_mask = inputs.attention_mask.to(torch_device) + + # prepare the audio encoder inputs + input_features = inputs.input_features.to(torch_device) + + # prepare the decoder inputs + pad_token_id = model.generation_config.pad_token_id + decoder_input_ids = ( + torch.ones((input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long).to(torch_device) + * pad_token_id + ) + + with torch.no_grad(): + logits = model( + input_ids, + attention_mask=attention_mask, + input_features=input_features, + decoder_input_ids=decoder_input_ids, + ).logits + + # fmt: off + EXPECTED_LOGITS = torch.tensor([ + [ 0.7479, 0.3742, 0.6253, -7.9405, 0.7105, -6.9995, 0.7792, -3.0482], + [-2.7905, 0.7492, -0.2556, -8.1586, -1.6740, 0.5771, -8.3650, -0.0908] + ]) + # fmt: on + + self.assertTrue(logits.shape == (8, 240, 2048)) + self.assertTrue(torch.allclose(logits[1:3, -1, 32:40].cpu(), EXPECTED_LOGITS, atol=1e-4)) + + @slow + def test_generate_unconditional_greedy(self): + model = self.model + + # only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same + unconditional_inputs = self.processor.get_unconditional_inputs(num_samples=1).to(torch_device) + + output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=10, guidance_scale=1.0) + + # fmt: off + EXPECTED_VALUES = torch.tensor( + [ + 1.2741e-04, -8.0466e-05, 5.5789e-04, 1.0402e-03, 2.6547e-04, + 1.5587e-05, -1.4210e-04, -9.7303e-05, 6.4504e-04, 5.0903e-04, + 9.6474e-04, 1.0498e-03, 3.7210e-05, -5.3652e-04, -3.6579e-04, -2.5678e-04 + ] + ) + # fmt: on + + self.assertTrue(output_values.shape == (1, 1, 4480)) + self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) + + @slow + def test_generate_unconditional_sampling(self): + model = self.model + + # for stochastic sampling we can generate multiple outputs + unconditional_inputs = self.processor.get_unconditional_inputs(num_samples=2).to(torch_device) + + set_seed(0) + + output_values = model.generate( + **unconditional_inputs, do_sample=True, max_new_tokens=10, guidance_scale=1.0, temperature=1.0, top_k=250 + ) + + # fmt: off + EXPECTED_VALUES = torch.tensor( + [ + -0.0085, -0.0160, 0.0028, 0.0005, -0.0095, 0.0028, -0.0122, -0.0299, + -0.0052, -0.0145, 0.0092, 0.0063, -0.0378, -0.0621, -0.0784, -0.0120, + ] + ) + # fmt: on + + self.assertTrue(output_values.shape == (2, 1, 4480)) + self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) + + @slow + def test_generate_text_prompt_greedy(self): + model = self.model + processor = self.processor + + inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") + + # prepare the encoder inputs + input_ids = inputs.input_ids.to(torch_device) + attention_mask = inputs.attention_mask.to(torch_device) + + output_values = model.generate( + input_ids, attention_mask=attention_mask, do_sample=False, guidance_scale=None, max_new_tokens=10 + ) + + # fmt: off + EXPECTED_VALUES = torch.tensor( + [ + 1.2741e-04, -8.0474e-05, 5.5789e-04, 1.0402e-03, 2.6547e-04, + 1.5597e-05, -1.4210e-04, -9.7309e-05, 6.4504e-04, 5.0903e-04 + ] + ) + # fmt: on + + self.assertTrue(output_values.shape == (2, 1, 4480)) + self.assertTrue(torch.allclose(output_values[0, 0, :10].cpu(), EXPECTED_VALUES, atol=1e-4)) + + @slow + def test_generate_text_prompt_greedy_with_classifier_free_guidance(self): + model = self.model + processor = self.processor + + inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") + + # prepare the encoder inputs + input_ids = inputs.input_ids.to(torch_device) + attention_mask = inputs.attention_mask.to(torch_device) + + output_values = model.generate( + input_ids, attention_mask=attention_mask, do_sample=False, guidance_scale=3, max_new_tokens=10 + ) + + # fmt: off + EXPECTED_VALUES = torch.tensor( + [ + 1.2741e-04, -8.0474e-05, 5.5789e-04, 1.0402e-03, 2.6547e-04, + 1.5597e-05, -1.4210e-04, -9.7309e-05, 6.4504e-04, 5.0903e-04, + 9.6475e-04, 1.0499e-03, 3.7215e-05, -5.3651e-04, -3.6578e-04, -2.5678e-04 + ] + ) + # fmt: on + + self.assertTrue(output_values.shape == (2, 1, 4480)) + self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) + + @slow + def test_generate_text_prompt_sampling(self): + model = self.model + processor = self.processor + + inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt") + + # prepare the encoder inputs + input_ids = inputs.input_ids.to(torch_device) + attention_mask = inputs.attention_mask.to(torch_device) + + set_seed(0) + output_values = model.generate( + input_ids, + attention_mask=attention_mask, + do_sample=True, + guidance_scale=None, + max_new_tokens=10, + temperature=1.0, + top_k=250, + ) + + # fmt: off + EXPECTED_VALUES = torch.tensor( + [ + -0.0165, -0.0222, -0.0041, -0.0058, -0.0145, -0.0023, -0.0160, -0.0310, + -0.0055, -0.0127, 0.0104, 0.0105, -0.0326, -0.0611, -0.0744, -0.0083 + ] + ) + # fmt: on + + self.assertTrue(output_values.shape == (2, 1, 4480)) + self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) + + @slow + def test_generate_text_audio_prompt(self): + model = self.model + processor = self.processor + + audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] + text = ["80s music", "Club techno"] + + inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt").to(torch_device) + + output_values = model.generate(**inputs, do_sample=False, guidance_scale=None, max_new_tokens=10) + + # fmt: off + EXPECTED_VALUES = torch.tensor( + [ + -1.1999e-04, -2.2303e-04, 4.6296e-04, 1.0524e-03, 2.4827e-04, + -4.0294e-05, -1.2468e-04, 4.9846e-05, 7.1484e-04, 4.4198e-04, + 7.9063e-04, 8.8141e-04, -6.1807e-05, -6.1856e-04, -3.6235e-04, -2.7226e-04 + ] + ) + # fmt: on + + self.assertTrue(output_values.shape == (2, 1, 4480)) + self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, atol=1e-4)) + + +@require_torch +@require_torchaudio +class MusicgenMelodyStereoIntegrationTests(unittest.TestCase): + @cached_property + def model(self): + return MusicgenMelodyForConditionalGeneration.from_pretrained("ylacombe/musicgen-stereo-melody").to( + torch_device + ) + + @cached_property + def processor(self): + return MusicgenMelodyProcessor.from_pretrained("ylacombe/musicgen-stereo-melody") + + @slow + def test_generate_unconditional_greedy(self): + model = self.model + + # only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same + unconditional_inputs = self.processor.get_unconditional_inputs(num_samples=1).to(torch_device) + + output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=12, guidance_scale=1.0) + + # fmt: off + EXPECTED_VALUES_LEFT = torch.tensor( + [ + 1.2742e-04, -8.0480e-05, 5.5788e-04, 1.0401e-03, 2.6547e-04, + 1.5587e-05, -1.4211e-04, -9.7308e-05, 6.4503e-04, 5.0903e-04, + 9.6475e-04, 1.0499e-03, 3.7205e-05, -5.3652e-04, -3.6579e-04, 2.5679e-04 + ] + ) + # fmt: on + + # (bsz, channels, seq_len) + self.assertTrue(output_values.shape == (1, 2, 5760)) + self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT, atol=6e-4)) + self.assertTrue(torch.allclose(output_values[0, 1, :16].cpu(), EXPECTED_VALUES_LEFT, atol=6e-4)) + + @slow + def test_generate_text_audio_prompt(self): + model = self.model + processor = self.processor + + audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] + text = ["80s music", "Club techno"] + + inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt").to(torch_device) + + output_values = model.generate(**inputs, do_sample=False, guidance_scale=3.0, max_new_tokens=12) + + # fmt: off + EXPECTED_VALUES_LEFT_FIRST_SAMPLE = torch.tensor( + [ + -0.0862, -0.1021, -0.0936, -0.0754, -0.0616, -0.0456, -0.0354, -0.0298, + -0.0036, 0.0222, 0.0523, 0.0660, 0.0496, 0.0356, 0.0457, 0.0769 + ] + ) + EXPECTED_VALUES_RIGHT_SECOND_SAMPLE = torch.tensor( + [ + -0.0327, -0.0450, -0.0264, -0.0278, -0.0365, -0.0272, -0.0401, -0.0574, + -0.0413, -0.0508, -0.0269, -0.0323, -0.0762, -0.1115, -0.1390, -0.0790 + ] + ) + # fmt: on + + # (bsz, channels, seq_len) + self.assertTrue(output_values.shape == (2, 2, 5760)) + self.assertTrue(torch.allclose(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT_FIRST_SAMPLE, atol=1e-4)) + self.assertTrue(torch.allclose(output_values[1, 1, :16].cpu(), EXPECTED_VALUES_RIGHT_SECOND_SAMPLE, atol=1e-4)) diff --git a/tests/models/musicgen_melody/test_processor_musicgen_melody.py b/tests/models/musicgen_melody/test_processor_musicgen_melody.py new file mode 100644 index 00000000000000..e00f31c495990f --- /dev/null +++ b/tests/models/musicgen_melody/test_processor_musicgen_melody.py @@ -0,0 +1,179 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for the MusicGen processor.""" + +import random +import shutil +import tempfile +import unittest + +import numpy as np + +from transformers import T5Tokenizer, T5TokenizerFast +from transformers.testing_utils import require_sentencepiece, require_torch, require_torchaudio +from transformers.utils.import_utils import is_torchaudio_available + + +if is_torchaudio_available(): + from transformers import MusicgenMelodyFeatureExtractor, MusicgenMelodyProcessor + + +global_rng = random.Random() + + +# Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list +def floats_list(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor""" + if rng is None: + rng = global_rng + + values = [] + for batch_idx in range(shape[0]): + values.append([]) + for _ in range(shape[1]): + values[-1].append(rng.random() * scale) + + return values + + +@require_torch +@require_sentencepiece +@require_torchaudio +# Copied from tests.models.musicgen.test_processing_musicgen.MusicgenProcessorTest with Musicgen->MusicgenMelody, Encodec->MusicgenMelody, padding_mask->attention_mask, input_values->input_features +class MusicgenMelodyProcessorTest(unittest.TestCase): + def setUp(self): + # Ignore copy + self.checkpoint = "facebook/musicgen-melody" + self.tmpdirname = tempfile.mkdtemp() + + def get_tokenizer(self, **kwargs): + return T5Tokenizer.from_pretrained(self.checkpoint, **kwargs) + + def get_feature_extractor(self, **kwargs): + return MusicgenMelodyFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) + + def tearDown(self): + shutil.rmtree(self.tmpdirname) + + def test_save_load_pretrained_default(self): + tokenizer = self.get_tokenizer() + feature_extractor = self.get_feature_extractor() + + processor = MusicgenMelodyProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + processor.save_pretrained(self.tmpdirname) + processor = MusicgenMelodyProcessor.from_pretrained(self.tmpdirname) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) + self.assertIsInstance(processor.tokenizer, T5TokenizerFast) + + self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) + self.assertIsInstance(processor.feature_extractor, MusicgenMelodyFeatureExtractor) + + def test_save_load_pretrained_additional_features(self): + processor = MusicgenMelodyProcessor( + tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor() + ) + processor.save_pretrained(self.tmpdirname) + + tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") + feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) + + processor = MusicgenMelodyProcessor.from_pretrained( + self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 + ) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) + self.assertIsInstance(processor.tokenizer, T5TokenizerFast) + + self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) + self.assertIsInstance(processor.feature_extractor, MusicgenMelodyFeatureExtractor) + + def test_feature_extractor(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = MusicgenMelodyProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + raw_speech = floats_list((3, 1000)) + + input_feat_extract = feature_extractor(raw_speech, return_tensors="np") + input_processor = processor(raw_speech, return_tensors="np") + + for key in input_feat_extract.keys(): + self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) + + def test_tokenizer(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = MusicgenMelodyProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + input_str = "This is a test string" + + encoded_processor = processor(text=input_str) + + encoded_tok = tokenizer(input_str) + + for key in encoded_tok.keys(): + self.assertListEqual(encoded_tok[key], encoded_processor[key]) + + def test_tokenizer_decode(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = MusicgenMelodyProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] + + decoded_processor = processor.batch_decode(sequences=predicted_ids) + decoded_tok = tokenizer.batch_decode(predicted_ids) + + self.assertListEqual(decoded_tok, decoded_processor) + + def test_model_input_names(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = MusicgenMelodyProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + self.assertListEqual( + processor.model_input_names, + feature_extractor.model_input_names, + msg="`processor` and `feature_extractor` model input names do not match", + ) + + # Ignore copy + def test_decode_audio(self): + feature_extractor = self.get_feature_extractor(padding_side="left") + tokenizer = self.get_tokenizer() + + processor = MusicgenMelodyProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + attention_mask = np.zeros((3, 20)) + attention_mask[0, -5:] = 1 + attention_mask[1, -20:] = 1 + attention_mask[2, -10:] = 1 + + generated_speech = np.asarray(floats_list((3, 20)))[:, None, :] + decoded_audios = processor.batch_decode(generated_speech, attention_mask=attention_mask) + + self.assertIsInstance(decoded_audios, list) + + for audio in decoded_audios: + self.assertIsInstance(audio, np.ndarray) + + self.assertTrue(decoded_audios[0].shape == (1, 5)) + self.assertTrue(decoded_audios[1].shape == (1, 20)) + self.assertTrue(decoded_audios[2].shape == (1, 10)) diff --git a/tests/utils/test_audio_utils.py b/tests/utils/test_audio_utils.py index 12d00929a969bd..d647181307656c 100644 --- a/tests/utils/test_audio_utils.py +++ b/tests/utils/test_audio_utils.py @@ -20,6 +20,7 @@ from transformers.audio_utils import ( amplitude_to_db, + chroma_filter_bank, hertz_to_mel, mel_filter_bank, mel_to_hertz, @@ -27,6 +28,11 @@ spectrogram, window_function, ) +from transformers.testing_utils import is_librosa_available, require_librosa + + +if is_librosa_available(): + from librosa.filters import chroma class AudioUtilsFunctionTester(unittest.TestCase): @@ -755,3 +761,57 @@ def test_amplitude_to_db(self): amplitude_to_db(spectrogram, min_value=0.0) with pytest.raises(ValueError): amplitude_to_db(spectrogram, db_range=-80) + + @require_librosa + def test_chroma_equivalence(self): + num_frequency_bins = 25 + num_chroma = 6 + sampling_rate = 24000 + + # test default parameters + original_chroma = chroma(sr=sampling_rate, n_chroma=num_chroma, n_fft=num_frequency_bins) + utils_chroma = chroma_filter_bank( + num_frequency_bins=num_frequency_bins, num_chroma=num_chroma, sampling_rate=sampling_rate + ) + + self.assertTrue(np.allclose(original_chroma, utils_chroma)) + + # test no weighting_parameters + original_chroma = chroma(sr=sampling_rate, n_chroma=num_chroma, n_fft=num_frequency_bins, octwidth=None) + utils_chroma = chroma_filter_bank( + num_frequency_bins=num_frequency_bins, + num_chroma=num_chroma, + sampling_rate=sampling_rate, + weighting_parameters=None, + ) + + self.assertTrue(np.allclose(original_chroma, utils_chroma)) + + # test with L1 norm + original_chroma = chroma(sr=sampling_rate, n_chroma=num_chroma, n_fft=num_frequency_bins, norm=1.0) + utils_chroma = chroma_filter_bank( + num_frequency_bins=num_frequency_bins, num_chroma=num_chroma, sampling_rate=sampling_rate, power=1.0 + ) + + self.assertTrue(np.allclose(original_chroma, utils_chroma)) + + # test starting at 'A' chroma, power = None, tuning = 0, different weighting_parameters + original_chroma = chroma( + sr=sampling_rate, + n_chroma=num_chroma, + n_fft=num_frequency_bins, + norm=None, + base_c=None, + octwidth=1.0, + ctroct=4.0, + ) + utils_chroma = chroma_filter_bank( + num_frequency_bins=num_frequency_bins, + num_chroma=num_chroma, + sampling_rate=sampling_rate, + power=None, + start_at_c_chroma=False, + weighting_parameters=(4.0, 1.0), + ) + + self.assertTrue(np.allclose(original_chroma, utils_chroma)) diff --git a/utils/check_docstrings.py b/utils/check_docstrings.py index 303579853f5ba0..a58d08eccaf305 100644 --- a/utils/check_docstrings.py +++ b/utils/check_docstrings.py @@ -395,6 +395,7 @@ "MraConfig", "MusicgenDecoderConfig", "MusicgenForConditionalGeneration", + "MusicgenMelodyForConditionalGeneration", "MvpConfig", "MvpTokenizerFast", "MT5Tokenizer", diff --git a/utils/check_repo.py b/utils/check_repo.py index 4e7705d958b77b..f577bf1507424e 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -294,6 +294,7 @@ "BarkCoarseModel", "BarkFineModel", "BarkSemanticModel", + "MusicgenMelodyModel", "MusicgenModel", "MusicgenForConditionalGeneration", "SpeechT5ForSpeechToSpeech", diff --git a/utils/not_doctested.txt b/utils/not_doctested.txt index 3e4c78cd9c4e74..6deb9b8072e780 100644 --- a/utils/not_doctested.txt +++ b/utils/not_doctested.txt @@ -176,6 +176,7 @@ docs/source/en/model_doc/mpt.md docs/source/en/model_doc/mra.md docs/source/en/model_doc/mt5.md docs/source/en/model_doc/musicgen.md +docs/source/en/model_doc/musicgen_melody.md docs/source/en/model_doc/mvp.md docs/source/en/model_doc/nat.md docs/source/en/model_doc/nezha.md @@ -706,6 +707,7 @@ src/transformers/models/mt5/modeling_flax_mt5.py src/transformers/models/mt5/modeling_mt5.py src/transformers/models/mt5/modeling_tf_mt5.py src/transformers/models/musicgen/convert_musicgen_transformers.py +src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py src/transformers/models/mvp/modeling_mvp.py src/transformers/models/nezha/modeling_nezha.py src/transformers/models/nllb_moe/configuration_nllb_moe.py diff --git a/utils/slow_documentation_tests.txt b/utils/slow_documentation_tests.txt index 302778c7320039..e36eae6e2d514a 100644 --- a/utils/slow_documentation_tests.txt +++ b/utils/slow_documentation_tests.txt @@ -8,4 +8,6 @@ docs/source/en/tasks/prompting.md src/transformers/models/blip_2/modeling_blip_2.py src/transformers/models/ctrl/modeling_ctrl.py src/transformers/models/fuyu/modeling_fuyu.py -src/transformers/models/kosmos2/modeling_kosmos2.py \ No newline at end of file +src/transformers/models/kosmos2/modeling_kosmos2.py +src/transformers/models/musicgen_melody/modeling_musicgen_melody.py +src/transformers/models/musicgen_melody/processing_musicgen_melody.py \ No newline at end of file