-
Notifications
You must be signed in to change notification settings - Fork 4.3k
tests : add script to benchmark whisper.cpp on LibriSpeech corpus #2999
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,6 @@ | ||
__pycache__ | ||
*.tar.gz | ||
*.txt | ||
eval.conf | ||
venv | ||
LibriSpeech |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
TAR_URL = https://www.openslr.org/resources/12/test-clean.tar.gz | ||
|
||
all: eval | ||
|
||
eval: | ||
$(MAKE) -f eval.mk | ||
|
||
clean: | ||
$(MAKE) -f eval.mk clean | ||
|
||
get-audio: | ||
wget -c $(TAR_URL) | ||
tar -xf test-clean.tar.gz | ||
|
||
.PHONY: all eval clean setup-venv clean-venv get-audio |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
# whisper.cpp/tests/librispeech | ||
|
||
[LibriSpeech](https://www.openslr.org/12) is a standard dataset for | ||
training and evaluating automatic speech recognition systems. | ||
|
||
This directory contains a set of tools to evaluate the recognition | ||
performance of whisper.cpp on LibriSpeech corpus. | ||
|
||
## Quick Start | ||
|
||
1. (Pre-requirement) Compile `whisper-cli` and prepare the Whisper | ||
model in `ggml` format. | ||
|
||
``` | ||
$ # Execute the commands below in the project root dir. | ||
$ cmake -B build | ||
$ cmake --build build --config Release | ||
$ ./models/download-ggml-model.sh tiny | ||
``` | ||
|
||
Consult [whisper.cpp/README.md](../../README.md) for more details. | ||
|
||
2. Download the audio files from LibriSpeech project. | ||
|
||
``` | ||
$ make get-audio | ||
``` | ||
|
||
3. Set up the environment to compute WER score. | ||
|
||
``` | ||
$ pip install -r requirements.txt | ||
``` | ||
|
||
For example, if you use `virtualenv`, you can set up it as follows: | ||
|
||
``` | ||
$ python3 -m venv venv | ||
$ . venv/bin/activate | ||
$ pip install -r requirements.txt | ||
``` | ||
|
||
4. Run the benchmark test. | ||
|
||
``` | ||
$ make | ||
``` | ||
|
||
## How-to guides | ||
|
||
### How to change the inferece parameters | ||
|
||
Create `eval.conf` and override variables. | ||
|
||
``` | ||
WHISPER_MODEL = large-v3-turbo | ||
WHISPER_FLAGS = --no-prints --threads 8 --language en --output-txt | ||
``` | ||
|
||
Check out `eval.mk` for more details. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This Confirmed to work on Ubuntu 24.04 and Amazon Linux 2023. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,39 @@ | ||
PYTHON = python | ||
|
||
WHISPER_PREFIX = ../../ | ||
WHISPER_MODEL = tiny | ||
|
||
WHISPER_CLI = $(WHISPER_PREFIX)build/bin/whisper-cli | ||
WHISPER_FLAGS = --no-prints --language en --output-txt | ||
|
||
# You can create eval.conf to override the WHISPER_* variables | ||
# defined above. | ||
-include eval.conf | ||
|
||
# This follows the file structure of the LibriSpeech project. | ||
AUDIO_SRCS = $(sort $(wildcard LibriSpeech/*/*/*/*.flac)) | ||
TRANS_TXTS = $(addsuffix .txt, $(AUDIO_SRCS)) | ||
|
||
# We output the evaluation result to this file. | ||
DONE = $(WHISPER_MODEL).txt | ||
|
||
all: $(DONE) | ||
|
||
$(DONE): $(TRANS_TXTS) | ||
$(PYTHON) eval.py > [email protected] | ||
mv [email protected] $@ | ||
|
||
# Note: This task writes to a temporary file first to | ||
# create the target file atomically. | ||
%.flac.txt: %.flac | ||
$(WHISPER_CLI) $(WHISPER_FLAGS) --model $(WHISPER_PREFIX)models/ggml-$(WHISPER_MODEL).bin --file $^ --output-file $^.tmp | ||
mv $^.tmp.txt $^.txt | ||
|
||
archive: | ||
tar -czf $(WHISPER_MODEL).tar.gz --exclude="*.flac" LibriSpeech $(DONE) | ||
|
||
clean: | ||
@rm -f $(TRANS_TXTS) | ||
@rm -f $(DONE) | ||
|
||
.PHONY: all clean |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
import os | ||
import glob | ||
import jiwer | ||
from normalizers import EnglishTextNormalizer | ||
|
||
def get_reference(): | ||
ref = {} | ||
for path in glob.glob('LibriSpeech/*/*/*/*.trans.txt'): | ||
with open(path) as fp: | ||
for line in fp: | ||
code, text = line.strip().split(" ", maxsplit=1) | ||
ref [code] = text | ||
return ref | ||
|
||
def get_hypothesis(): | ||
hyp = {} | ||
for path in glob.glob('LibriSpeech/*/*/*/*.flac.txt'): | ||
with open(path) as fp: | ||
text = fp.read().strip() | ||
code = os.path.basename(path).replace('.flac.txt', '') | ||
hyp[code] = text | ||
return hyp | ||
|
||
def get_codes(): | ||
codes = [] | ||
for path in glob.glob('LibriSpeech/*/*/*/*.flac'): | ||
codes.append(os.path.basename(path).replace('.flac', '')) | ||
return sorted(codes) | ||
|
||
def main(): | ||
normalizer = EnglishTextNormalizer() | ||
|
||
ref_orig = get_reference() | ||
hyp_orig = get_hypothesis() | ||
|
||
ref_clean = [] | ||
hyp_clean = [] | ||
|
||
for code in get_codes(): | ||
ref_clean.append(normalizer(ref_orig[code])) | ||
hyp_clean.append(normalizer(hyp_orig[code])) | ||
|
||
wer = jiwer.wer(ref_clean, hyp_clean) | ||
print(f"WER: {wer * 100:.2f}%") | ||
|
||
if __name__ == '__main__': | ||
main() |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
Code in this directory is adapted from OpenAI Whisper project | ||
(https://github.com/openai/whisper) and carries the following | ||
copyright and license. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As I mentioned in
|
||
|
||
MIT License | ||
|
||
Copyright (c) 2022 OpenAI | ||
|
||
Permission is hereby granted, free of charge, to any person obtaining a copy | ||
of this software and associated documentation files (the "Software"), to deal | ||
in the Software without restriction, including without limitation the rights | ||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
copies of the Software, and to permit persons to whom the Software is | ||
furnished to do so, subject to the following conditions: | ||
|
||
The above copyright notice and this permission notice shall be included in all | ||
copies or substantial portions of the Software. | ||
|
||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
SOFTWARE. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
from .basic import BasicTextNormalizer as BasicTextNormalizer | ||
from .english import EnglishTextNormalizer as EnglishTextNormalizer |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,80 @@ | ||
import re | ||
import unicodedata | ||
|
||
import regex | ||
|
||
# non-ASCII letters that are not separated by "NFKD" normalization | ||
ADDITIONAL_DIACRITICS = { | ||
"œ": "oe", | ||
"Œ": "OE", | ||
"ø": "o", | ||
"Ø": "O", | ||
"æ": "ae", | ||
"Æ": "AE", | ||
"ß": "ss", | ||
"ẞ": "SS", | ||
"đ": "d", | ||
"Đ": "D", | ||
"ð": "d", | ||
"Ð": "D", | ||
"þ": "th", | ||
"Þ": "th", | ||
"ł": "l", | ||
"Ł": "L", | ||
} | ||
|
||
|
||
def remove_symbols_and_diacritics(s: str, keep=""): | ||
""" | ||
Replace any other markers, symbols, and punctuations with a space, | ||
and drop any diacritics (category 'Mn' and some manual mappings) | ||
""" | ||
return "".join( | ||
( | ||
c | ||
if c in keep | ||
else ( | ||
ADDITIONAL_DIACRITICS[c] | ||
if c in ADDITIONAL_DIACRITICS | ||
else ( | ||
"" | ||
if unicodedata.category(c) == "Mn" | ||
else " " if unicodedata.category(c)[0] in "MSP" else c | ||
) | ||
) | ||
) | ||
for c in unicodedata.normalize("NFKD", s) | ||
) | ||
|
||
|
||
def remove_symbols(s: str): | ||
""" | ||
Replace any other markers, symbols, punctuations with a space, keeping diacritics | ||
""" | ||
return "".join( | ||
" " if unicodedata.category(c)[0] in "MSP" else c | ||
for c in unicodedata.normalize("NFKC", s) | ||
) | ||
|
||
|
||
class BasicTextNormalizer: | ||
def __init__(self, remove_diacritics: bool = False, split_letters: bool = False): | ||
self.clean = ( | ||
remove_symbols_and_diacritics if remove_diacritics else remove_symbols | ||
) | ||
self.split_letters = split_letters | ||
|
||
def __call__(self, s: str): | ||
s = s.lower() | ||
s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets | ||
s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis | ||
s = self.clean(s).lower() | ||
|
||
if self.split_letters: | ||
s = " ".join(regex.findall(r"\X", s, regex.U)) | ||
|
||
s = re.sub( | ||
r"\s+", " ", s | ||
) # replace any successive whitespace characters with a space | ||
|
||
return s |
Uh oh!
There was an error while loading. Please reload this page.