mirror of https://github.com/ollama/ollama.git
When running the subprocess as a background service windows may throttle, which can lead to thrashing and very poor token rate. |
||
|---|---|---|
| .. | ||
| ext_server | ||
| generate | ||
| llama.cpp@8962422b1c | ||
| patches | ||
| filetype.go | ||
| ggla.go | ||
| ggml.go | ||
| ggml_test.go | ||
| gguf.go | ||
| llm.go | ||
| llm_darwin.go | ||
| llm_linux.go | ||
| llm_windows.go | ||
| memory.go | ||
| memory_test.go | ||
| server.go | ||
| status.go | ||