From 13a03c5d33120f4fa0a2adad3631a434bbc63b7d Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Thu, 5 Jun 2025 02:57:42 -0700 Subject: [PATCH] llama : allow using mmap without PrefetchVirtualMemory, apply GGML_WIN_VER to llama.cpp sources (llama/14013) --- ggml/CMakeLists.txt | 2 +- ggml/src/CMakeLists.txt | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index 3d01184a..e186fdf3 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -137,7 +137,7 @@ set(GGML_CPU_ARM_ARCH "" CACHE STRING "ggml: CPU architecture for ARM") set(GGML_CPU_POWERPC_CPUTYPE "" CACHE STRING "ggml: CPU type for PowerPC") -if (WIN32) +if (MINGW) set(GGML_WIN_VER "0x602" CACHE STRING "ggml: Windows version") endif() diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index abaca7c0..d91dbc46 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -125,7 +125,6 @@ if (NOT MSVC) endif() if (MINGW) - # Target Windows 8 for PrefetchVirtualMemory add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER}) endif()