whisper : disable CUDA mel + fix FFMPEG

This commit is contained in:
Georgi Gerganov 2024-06-26 20:11:38 +03:00
parent 3efedb9511
commit dc8cc2dd6f
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735
6 changed files with 68 additions and 41 deletions

View File

@ -22,10 +22,35 @@ endif()
set(TARGET common)
unset(COMMON_EXTRA_LIBS)
if (WHISPER_FFMPEG)
# As of cmake 3.27, there is no official cmake support for FindFFmpeg.
# Consequnelty we added a FindFFmpeg.cmake script the cmake subfolder:
# whisper.cpp does not need the full ffmpeg libs, just AVFORMAT AVCODEC AVUTIL SWRESAMPLE
# libswresample performs highly optimized audio resampling, rematrixing and sample format conversion operations
# libavcodec provides a generic encoding/decoding framework and contains multiple decoders and encoders for audio, video and subtitle streams, and several bitstream filters.
# libavformat provides a generic framework for multiplexing and demultiplexing (muxing and demuxing) audio, video and subtitle streams.
find_package(FFmpeg REQUIRED)
if (NOT ${FFMPEG_FOUND})
message(FATAL_ERROR "Cannot find ffmpeg libs/headers")
endif()
message(STATUS "Found ffmpeg libs: ${FFMPEG_LIBRARIES}")
message(STATUS "Found ffmpeg headers in: ${FFMPEG_INCLUDE_DIRS}")
message(STATUS "ffmpeg definitions: ${FFMPEG_DEFINITIONS}")
message(STATUS "Found avformat ${AVFORMAT_VERSION}")
include_directories(${FFMPEG_INCLUDE_DIRS})
add_compile_definitions(WHISPER_FFMPEG)
list(APPEND COMMON_EXTRA_LIBS ${FFMPEG_LIBRARIES})
set(COMMON_SOURCES_FFMPEG ffmpeg-transcode.cpp)
endif()
add_library(${TARGET} STATIC
common.h
common.cpp
@ -38,7 +63,7 @@ add_library(${TARGET} STATIC
include(DefaultTargetOptions)
target_link_libraries(${TARGET} PRIVATE whisper)
target_link_libraries(${TARGET} PRIVATE whisper ${COMMON_EXTRA_LIBS})
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
@ -56,7 +81,7 @@ if (WHISPER_SDL2)
include(DefaultTargetOptions)
target_include_directories(${TARGET} PUBLIC ${SDL2_INCLUDE_DIRS})
target_link_libraries(${TARGET} PRIVATE ${SDL2_LIBRARIES})
target_link_libraries (${TARGET} PRIVATE ${SDL2_LIBRARIES})
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
@ -108,7 +133,7 @@ if (WHISPER_SDL2)
set_target_properties(talk-llama PROPERTIES FOLDER "examples")
add_subdirectory(lsp)
set_target_properties(lsp PROPERTIES FOLDER "examples")
if (LLAMA_SYCL)
if (GGML_SYCL)
add_subdirectory(sycl)
set_target_properties(sycl PROPERTIES FOLDER "examples")
endif()

View File

@ -30,7 +30,7 @@ extern bool ffmpeg_decode_audio(const std::string & ifname, std::vector<uint8_t>
#endif
// Function to check if the next argument exists
std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) {
static std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) {
if (i + 1 < argc && argv[i + 1][0] != '-') {
return argv[++i];
} else {
@ -346,7 +346,7 @@ std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::stri
return tokens;
}
std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, char delimiter) {
static std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, char delimiter) {
std::vector<gpt_vocab::id> output;
std::stringstream ss(input);
std::string token;
@ -358,7 +358,7 @@ std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, ch
return output;
}
std::map<std::string, std::vector<gpt_vocab::id>> extract_tests_from_file(const std::string & fpath_test){
static std::map<std::string, std::vector<gpt_vocab::id>> extract_tests_from_file(const std::string & fpath_test){
if (fpath_test.empty()){
fprintf(stderr, "%s : No test file found.\n", __func__);
return std::map<std::string, std::vector<gpt_vocab::id>>();

View File

@ -24,7 +24,7 @@ if out=$($CC -dumpmachine); then
build_target=$out
fi
echo "int LLAMA_BUILD_NUMBER = ${build_number};"
echo "char const *LLAMA_COMMIT = \"${build_commit}\";"
echo "char const *LLAMA_COMPILER = \"${build_compiler}\";"
echo "char const *LLAMA_BUILD_TARGET = \"${build_target}\";"
echo "int WHISPER_BUILD_NUMBER = ${build_number};"
echo "char const *WHISPER_COMMIT = \"${build_commit}\";"
echo "char const *WHISPER_COMPILER = \"${build_compiler}\";"
echo "char const *WHISPER_BUILD_TARGET = \"${build_target}\";"

View File

@ -77,27 +77,27 @@ if (WHISPER_OPENVINO)
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
endif()
if (GGML_CUDA)
cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES
find_package(CUDAToolkit)
if (CUDAToolkit_FOUND)
message(STATUS "CUDA found")
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
# 52 == lowest CUDA 12 standard
# 60 == f16 CUDA intrinsics
# 61 == integer CUDA intrinsics
# 70 == compute capability at which unrolling a loop in mul_mat_q kernels is faster
set(CMAKE_CUDA_ARCHITECTURES "52;61;70") # lowest CUDA 12 standard + lowest for integer intrinsics
endif()
message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
enable_language(CUDA)
else()
message(WARNING "CUDA not found")
endif()
endif()
#if (GGML_CUDA)
# cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES
#
# find_package(CUDAToolkit)
# if (CUDAToolkit_FOUND)
# message(STATUS "CUDA found")
#
# if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
# # 52 == lowest CUDA 12 standard
# # 60 == f16 CUDA intrinsics
# # 61 == integer CUDA intrinsics
# # 70 == compute capability at which unrolling a loop in mul_mat_q kernels is faster
# set(CMAKE_CUDA_ARCHITECTURES "52;61;70") # lowest CUDA 12 standard + lowest for integer intrinsics
# endif()
# message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
#
# enable_language(CUDA)
# else()
# message(WARNING "CUDA not found")
# endif()
#endif()
# whisper
@ -107,11 +107,12 @@ add_library(whisper
whisper-mel.hpp
)
if (GGML_CUDA)
target_sources(whisper PRIVATE whisper-mel-cuda.cu)
target_link_libraries(whisper PRIVATE CUDA::cufft)
endif()
# TODO: disabled because it relies on ggml internals that are no longer accessible (ggml-backend-impl.h, ggml-cuda/common.cuh, ..)
#if (GGML_CUDA)
# target_sources(whisper PRIVATE whisper-mel-cuda.cu)
#
# target_link_libraries(whisper PRIVATE CUDA::cufft)
#endif()
# Set the version numbers
set_target_properties(whisper PROPERTIES

View File

@ -2,8 +2,7 @@
#include "whisper-mel-cuda.hpp"
#include "whisper.h"
#include <ggml-cuda/common.cuh>
#include <ggml-backend-impl.h>
#include <ggml-backend.h>
#include <cuda.h>
#include <cuda_runtime.h>

View File

@ -3215,7 +3215,9 @@ struct mel_calc_cpu : public whisper_mel_calc {
}
static whisper_mel_calc * whisper_mel_calc_create(ggml_backend_t backend, const whisper_filters & filters) {
#if defined(GGML_USE_CUDA) && !defined(GGML_USE_HIPBLAS)
// TODO: disabled because it relies on ggml internals that are no longer accessible (ggml-backend-impl.h, ggml-cuda/common.cuh, ..)
//#if defined(GGML_USE_CUDA) && !defined(GGML_USE_HIPBLAS)
#if 0
if (ggml_backend_is_cuda(backend)) {
auto ret = whisper_mel_calc_create_cuda(backend, filters);
if (ret) {