mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-04-07 19:58:50 +02:00
This commit updates the recommended version of Python to 3.11 for Core ML conversion support. It also adds the `-e` flag to the `generate-coreml-model.sh` script to ensure that the script exits on the first error. The motivation for this that when following the installation instructions using Python 3.10 I get the following error: ```console (venv) $ ./models/generate-coreml-model.sh base.en A module that was compiled using NumPy 1.x cannot be run in NumPy 2.1.3 as it may crash. To support both 1.x and 2.x versions of NumPy, modules must be compiled with NumPy 2.0. Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. If you are a user of the module, the easiest solution will be to downgrade to 'numpy<2' or try to upgrade the affected module. We expect that some modules will need time to support NumPy 2. Traceback (most recent call last): File "/whisper-work/models/convert-whisper-to-coreml.py", line 2, in <module> import torch File "/whisper-work/venv/lib/python3.10/site-packages/torch/__init__.py", line 870, in <module> from . import _masked File "/whisper-work/venv/lib/python3.10/site-packages/torch/_masked/__init__.py", line 420, in <module> def sum(input: Tensor, File "/whisper-work/venv/lib/python3.10/site-packages/torch/_masked/__init__.py", line 223, in _apply_docstring_templates example_input = torch.tensor([[-3, -2, -1], [0, 1, 2]]) /whisper-work/venv/lib/python3.10/site-packages/torch/_masked/__init__.py:223: UserWarning: Failed to initialize NumPy: _ARRAY_API not found (Triggered internally at /Users/distiller/project/pytorch/torch/csrc/utils/tensor_numpy.cpp:68.) example_input = torch.tensor([[-3, -2, -1], [0, 1, 2]]) Minimum required torch version for importing coremltools.optimize.torch is 2.1.0. Got torch version 1.11.0. Traceback (most recent call last): File "/whisper-work/models/convert-whisper-to-coreml.py", line 4, in <module> import coremltools as ct File "/whisper-work/venv/lib/python3.10/site-packages/coremltools/__init__.py", line 120, in <module> from . import converters, models, optimize, proto File "/whisper-work/venv/lib/python3.10/site-packages/coremltools/converters/__init__.py", line 7, in <module> from . import libsvm, sklearn, xgboost File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/converters/xgboost/__init__.py", line 6, in <module> from ._tree import convert File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/converters/xgboost/_tree.py", line 9, in <module> from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/converters/xgboost/_tree_ensemble.py", line 11, in <module> from ...models.tree_ensemble import TreeEnsembleClassifier File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/models/__init__.py", line 6, in <module> from . import ( File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/models/ml_program/__init__.py", line 6, in <module> from . import compression_utils File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/models/ml_program/compression_utils.py", line 8, in <module> from coremltools.converters.mil.mil import Operation as _Operation File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/converters/mil/__init__.py", line 7, in <module> from .frontend.tensorflow.tf_op_registry import register_tf_op File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/converters/mil/frontend/__init__.py", line 6, in <module> from . import tensorflow, tensorflow2, torch File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/converters/mil/frontend/torch/__init__.py", line 11, in <module> from . import ops, quantization_ops File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/converters/mil/frontend/torch/ops.py", line 36, in <module> from .internal_graph import InternalTorchIRGraph, InternalTorchIRNode File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/converters/mil/frontend/torch/internal_graph.py", line 15, in <module> from .exir_utils import extract_io_from_exir_program File "/Users/danbev/work/ai/whisper-work/venv/lib/python3.10/site-packages/coremltools/converters/mil/frontend/torch/exir_utils.py", line 99, in <module> ) -> Dict[str, torch.fx.Node]: AttributeError: module 'torch' has no attribute 'fx' ``` Using Python3.11 the conversion script runs without any errors.
39 lines
1.3 KiB
Bash
Executable File
39 lines
1.3 KiB
Bash
Executable File
#!/bin/sh
|
|
|
|
set -e
|
|
|
|
# Usage: ./generate-coreml-model.sh <model-name>
|
|
if [ $# -eq 0 ]; then
|
|
echo "No model name supplied"
|
|
echo "Usage for Whisper models: ./generate-coreml-model.sh <model-name>"
|
|
echo "Usage for HuggingFace models: ./generate-coreml-model.sh -h5 <model-name> <model-path>"
|
|
exit 1
|
|
elif [ "$1" = "-h5" ] && [ $# != 3 ]; then
|
|
echo "No model name and model path supplied for a HuggingFace model"
|
|
echo "Usage for HuggingFace models: ./generate-coreml-model.sh -h5 <model-name> <model-path>"
|
|
exit 1
|
|
fi
|
|
|
|
mname="$1"
|
|
|
|
wd=$(dirname "$0")
|
|
cd "$wd/../" || exit
|
|
|
|
if [ "$mname" = "-h5" ]; then
|
|
mname="$2"
|
|
mpath="$3"
|
|
echo "$mpath"
|
|
python3 models/convert-h5-to-coreml.py --model-name "$mname" --model-path "$mpath" --encoder-only True
|
|
else
|
|
python3 models/convert-whisper-to-coreml.py --model "$mname" --encoder-only True --optimize-ane True
|
|
fi
|
|
|
|
xcrun coremlc compile models/coreml-encoder-"${mname}".mlpackage models/
|
|
rm -rf models/ggml-"${mname}"-encoder.mlmodelc
|
|
mv -v models/coreml-encoder-"${mname}".mlmodelc models/ggml-"${mname}"-encoder.mlmodelc
|
|
|
|
# TODO: decoder (sometime in the future maybe)
|
|
#xcrun coremlc compile models/whisper-decoder-${mname}.mlpackage models/
|
|
#rm -rf models/ggml-${mname}-decoder.mlmodelc
|
|
#mv -v models/coreml_decoder_${mname}.mlmodelc models/ggml-${mname}-decoder.mlmodelc
|