Skip to content

Commit ff13eb6

Browse files
authored
Lasr model (#42648)
* nit on dac! * first commit * fixes * passing all API tests * nit * update * test update * make style * make * fixes * fix * test update * return attention mask by default * doc update * make * make * fix
1 parent e3673ed commit ff13eb6

23 files changed

+2670
-12
lines changed

docs/source/en/_toctree.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -901,6 +901,8 @@
901901
title: Hubert
902902
- local: model_doc/kyutai_speech_to_text
903903
title: Kyutai Speech-To-Text
904+
- local: model_doc/lasr
905+
title: LASR
904906
- local: model_doc/mimi
905907
title: Mimi
906908
- local: model_doc/mms

docs/source/en/model_doc/lasr.md

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
<!--Copyright 2025 The HuggingFace Inc. team. All rights reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4+
the License. You may obtain a copy of the License at
5+
6+
http://www.apache.org/licenses/LICENSE-2.0
7+
8+
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9+
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10+
specific language governing permissions and limitations under the License.
11+
12+
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
13+
rendered properly in your Markdown viewer.
14+
15+
-->
16+
17+
<div class="flex flex-wrap space-x-1">
18+
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
19+
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
20+
</div>
21+
22+
# LASR
23+
24+
## Overview
25+
26+
TODO
27+
28+
## Usage
29+
30+
### Basic usage
31+
32+
<hfoptions id="usage">
33+
<hfoption id="Pipeline">
34+
35+
```py
36+
from transformers import pipeline
37+
38+
pipe = pipeline("automatic-speech-recognition", model="path/to/lasr-model")
39+
out = pipe("path/to/audio.mp3")
40+
print(out)
41+
```
42+
43+
</hfoption>
44+
<hfoption id="AutoModel">
45+
46+
```py
47+
from transformers import AutoModelForCTC, AutoProcessor
48+
from datasets import load_dataset, Audio
49+
import torch
50+
51+
device = "cuda" if torch.cuda.is_available() else "cpu"
52+
53+
processor = AutoProcessor.from_pretrained("path/to/lasr-model")
54+
model = AutoModelForCTC.from_pretrained("path/to/lasr-model", dtype="auto", device_map=device)
55+
56+
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
57+
ds = ds.cast_column("audio", Audio(sampling_rate=processor.feature_extractor.sampling_rate))
58+
speech_samples = [el['array'] for el in ds["audio"][:5]]
59+
60+
inputs = processor(speech_samples, sampling_rate=processor.feature_extractor.sampling_rate)
61+
inputs.to(model.device, dtype=model.dtype)
62+
outputs = model.generate(**inputs)
63+
print(processor.batch_decode(outputs))
64+
```
65+
66+
</hfoption>
67+
</hfoptions>
68+
69+
### Making The Model Go Brrr
70+
71+
TODO
72+
73+
### Training
74+
75+
TODO
76+
77+
## LasrTokenizer
78+
79+
[[autodoc]] LasrTokenizer
80+
81+
## LasrFeatureExtractor
82+
83+
[[autodoc]] LasrFeatureExtractor
84+
- __call__
85+
86+
## LasrProcessor
87+
88+
[[autodoc]] LasrProcessor
89+
- __call__
90+
- batch_decode
91+
- decode
92+
93+
## LasrEncoderConfig
94+
95+
[[autodoc]] LasrEncoderConfig
96+
97+
## LasrCTCConfig
98+
99+
[[autodoc]] LasrCTCConfig
100+
101+
## LasrEncoder
102+
103+
[[autodoc]] LasrEncoder
104+
105+
## LasrForCTC
106+
107+
[[autodoc]] LasrForCTC
108+

src/transformers/models/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,7 @@
186186
from .jetmoe import *
187187
from .kosmos2 import *
188188
from .kyutai_speech_to_text import *
189+
from .lasr import *
189190
from .layoutlm import *
190191
from .layoutlmv2 import *
191192
from .layoutlmv3 import *

src/transformers/models/auto/configuration_auto.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,8 @@
221221
("kosmos-2", "Kosmos2Config"),
222222
("kosmos-2.5", "Kosmos2_5Config"),
223223
("kyutai_speech_to_text", "KyutaiSpeechToTextConfig"),
224+
("lasr_ctc", "LasrCTCConfig"),
225+
("lasr_encoder", "LasrEncoderConfig"),
224226
("layoutlm", "LayoutLMConfig"),
225227
("layoutlmv2", "LayoutLMv2Config"),
226228
("layoutlmv3", "LayoutLMv3Config"),
@@ -662,6 +664,9 @@
662664
("kosmos-2", "KOSMOS-2"),
663665
("kosmos-2.5", "KOSMOS-2.5"),
664666
("kyutai_speech_to_text", "KyutaiSpeechToText"),
667+
("lasr", "Lasr"),
668+
("lasr_ctc", "Lasr"),
669+
("lasr_encoder", "LasrEncoder"),
665670
("layoutlm", "LayoutLM"),
666671
("layoutlmv2", "LayoutLMv2"),
667672
("layoutlmv3", "LayoutLMv3"),
@@ -977,6 +982,8 @@
977982
("video_llama_3_vision", "video_llama_3"),
978983
("parakeet_encoder", "parakeet"),
979984
("parakeet_ctc", "parakeet"),
985+
("lasr_encoder", "lasr"),
986+
("lasr_ctc", "lasr"),
980987
("wav2vec2-bert", "wav2vec2_bert"),
981988
]
982989
)

src/transformers/models/auto/feature_extraction_auto.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,8 @@
4949
("granite_speech", "GraniteSpeechFeatureExtractor"),
5050
("hubert", "Wav2Vec2FeatureExtractor"),
5151
("kyutai_speech_to_text", "KyutaiSpeechToTextFeatureExtractor"),
52+
("lasr_ctc", "LasrFeatureExtractor"),
53+
("lasr_encoder", "LasrFeatureExtractor"),
5254
("markuplm", "MarkupLMFeatureExtractor"),
5355
("mimi", "EncodecFeatureExtractor"),
5456
("moonshine", "Wav2Vec2FeatureExtractor"),

src/transformers/models/auto/modeling_auto.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -222,6 +222,8 @@ class _BaseModelWithGenerate(PreTrainedModel, GenerationMixin):
222222
("kosmos-2", "Kosmos2Model"),
223223
("kosmos-2.5", "Kosmos2_5Model"),
224224
("kyutai_speech_to_text", "KyutaiSpeechToTextModel"),
225+
("lasr_ctc", "LasrForCTC"),
226+
("lasr_encoder", "LasrEncoder"),
225227
("layoutlm", "LayoutLMModel"),
226228
("layoutlmv2", "LayoutLMv2Model"),
227229
("layoutlmv3", "LayoutLMv3Model"),
@@ -1583,6 +1585,7 @@ class _BaseModelWithGenerate(PreTrainedModel, GenerationMixin):
15831585
# Model for Connectionist temporal classification (CTC) mapping
15841586
("data2vec-audio", "Data2VecAudioForCTC"),
15851587
("hubert", "HubertForCTC"),
1588+
("lasr_ctc", "LasrForCTC"),
15861589
("parakeet_ctc", "ParakeetForCTC"),
15871590
("sew", "SEWForCTC"),
15881591
("sew-d", "SEWDForCTC"),

src/transformers/models/dac/modeling_dac.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ def forward(self, hidden_state):
264264
return hidden_state
265265

266266

267-
class DacResidualVectorQuantize(nn.Module):
267+
class DacResidualVectorQuantizer(nn.Module):
268268
"""
269269
ResidualVectorQuantize block - Introduced in SoundStream: An end2end neural audio codec (https://huggingface.co/papers/2107.03312)
270270
"""
@@ -568,7 +568,7 @@ def __init__(self, config: DacConfig):
568568
self.encoder = DacEncoder(config)
569569
self.decoder = DacDecoder(config)
570570

571-
self.quantizer = DacResidualVectorQuantize(config)
571+
self.quantizer = DacResidualVectorQuantizer(config)
572572

573573
self.bits_per_codebook = int(math.log2(self.config.codebook_size))
574574
if 2**self.bits_per_codebook != self.config.codebook_size:

src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -514,7 +514,7 @@ def forward(self, hidden_states, attention_mask=None):
514514
515515
Args:
516516
hidden_states (`torch.Tensor` of shape `(batch, time, channels)`): Input tensor.
517-
attention_mask (`torch.Tensor` of shape `(batch, 1, time)`): Attention mask.
517+
attention_mask (`torch.Tensor` of shape `(batch, 1, time, time)`): Attention mask.
518518
519519
Returns:
520520
`torch.Tensor`: Output tensor of shape `(batch, time, channels)`.
@@ -530,7 +530,10 @@ def forward(self, hidden_states, attention_mask=None):
530530

531531
# Apply padding mask before convolution
532532
if attention_mask is not None:
533-
all_masked_rows = torch.all(~attention_mask, dim=-1)
533+
if attention_mask.dtype == torch.bool:
534+
all_masked_rows = torch.all(~attention_mask, dim=2)
535+
else:
536+
all_masked_rows = torch.all(~(attention_mask == 0.0), dim=2)
534537
hidden_states = hidden_states.masked_fill(all_masked_rows, 0.0)
535538

536539
# 1D Depthwise Conv
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# Copyright 2025 The HuggingFace Team. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
from typing import TYPE_CHECKING
15+
16+
from ...utils import _LazyModule
17+
from ...utils.import_utils import define_import_structure
18+
19+
20+
if TYPE_CHECKING:
21+
from .configuration_lasr import *
22+
from .feature_extraction_lasr import *
23+
from .modeling_lasr import *
24+
from .tokenization_lasr import *
25+
else:
26+
import sys
27+
28+
_file = globals()["__file__"]
29+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)

0 commit comments

Comments
 (0)