mirror of https://github.com/ollama/ollama.git
				
				
				
			Remove trailing spaces (#3889)
This commit is contained in:
		
							parent
							
								
									f503a848c2
								
							
						
					
					
						commit
						5f73c08729
					
				|  | @ -21,7 +21,7 @@ init_vars() { | |||
|         # TODO - add additional optimization flags... | ||||
|         CMAKE_DEFS="-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ${CMAKE_DEFS}" | ||||
|     fi | ||||
|     case $(uname -s) in  | ||||
|     case $(uname -s) in | ||||
|     "Darwin") | ||||
|         LIB_EXT="dylib" | ||||
|         WHOLE_ARCHIVE="-Wl,-force_load" | ||||
|  |  | |||
|  | @ -165,11 +165,11 @@ if [ -d "${CUDA_LIB_DIR}" ]; then | |||
|     fi | ||||
|     if [ "${ARCH}" == "arm64" ]; then | ||||
|         echo "ARM CPU detected - disabling unsupported AVX instructions" | ||||
|          | ||||
| 
 | ||||
|         # ARM-based CPUs such as M1 and Tegra do not support AVX extensions. | ||||
|         # | ||||
|         # CUDA compute < 6.0 lacks proper FP16 support on ARM.  | ||||
|         # Disabling has minimal performance effect while maintaining compatibility.  | ||||
|         # CUDA compute < 6.0 lacks proper FP16 support on ARM. | ||||
|         # Disabling has minimal performance effect while maintaining compatibility. | ||||
|         ARM64_DEFS="-DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_CUDA_F16=off" | ||||
|     fi | ||||
|     # Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue