llama : allow using mmap without PrefetchVirtualMemory, apply GGML_WIN_VER to llama.cpp sources (llama/14013)

This commit is contained in:
Diego Devesa 2025-06-05 02:57:42 -07:00 committed by Georgi Gerganov
parent 6dd91d4f7e
commit 13a03c5d33
2 changed files with 1 additions and 2 deletions

View File

@ -137,7 +137,7 @@ set(GGML_CPU_ARM_ARCH "" CACHE STRING "ggml: CPU architecture for ARM")
set(GGML_CPU_POWERPC_CPUTYPE "" CACHE STRING "ggml: CPU type for PowerPC")
if (WIN32)
if (MINGW)
set(GGML_WIN_VER "0x602" CACHE STRING "ggml: Windows version")
endif()

View File

@ -125,7 +125,6 @@ if (NOT MSVC)
endif()
if (MINGW)
# Target Windows 8 for PrefetchVirtualMemory
add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
endif()