2024-10-17 08:45:00 +08:00
|
|
|
package discover
|
2023-11-30 03:00:37 +08:00
|
|
|
|
2024-05-08 05:54:26 +08:00
|
|
|
import (
|
2025-10-02 06:12:32 +08:00
|
|
|
"context"
|
2024-05-08 05:54:26 +08:00
|
|
|
"log/slog"
|
2025-10-02 06:12:32 +08:00
|
|
|
"path/filepath"
|
|
|
|
"runtime"
|
|
|
|
"strings"
|
2024-05-08 05:54:26 +08:00
|
|
|
|
|
|
|
"github.com/ollama/ollama/format"
|
2025-10-02 06:12:32 +08:00
|
|
|
"github.com/ollama/ollama/ml"
|
2024-05-08 05:54:26 +08:00
|
|
|
)
|
|
|
|
|
2023-12-23 07:43:31 +08:00
|
|
|
type memInfo struct {
|
2023-11-30 03:00:37 +08:00
|
|
|
TotalMemory uint64 `json:"total_memory,omitempty"`
|
|
|
|
FreeMemory uint64 `json:"free_memory,omitempty"`
|
2024-10-16 02:36:08 +08:00
|
|
|
FreeSwap uint64 `json:"free_swap,omitempty"` // TODO split this out for system only
|
2023-12-23 07:43:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Beginning of an `ollama info` command
|
2024-10-16 02:36:08 +08:00
|
|
|
type GpuInfo struct { // TODO better name maybe "InferenceProcessor"?
|
2025-10-02 06:12:32 +08:00
|
|
|
ml.DeviceID
|
2023-12-23 07:43:31 +08:00
|
|
|
memInfo
|
2023-11-30 03:00:37 +08:00
|
|
|
|
2024-01-06 04:13:08 +08:00
|
|
|
// Optional variant to select (e.g. versions, cpu feature flags)
|
2024-05-31 12:54:07 +08:00
|
|
|
Variant string `json:"variant"`
|
2024-01-06 04:13:08 +08:00
|
|
|
|
2024-03-18 17:45:22 +08:00
|
|
|
// MinimumMemory represents the minimum memory required to use the GPU
|
2024-04-06 05:50:38 +08:00
|
|
|
MinimumMemory uint64 `json:"-"`
|
2024-03-18 17:45:22 +08:00
|
|
|
|
2024-03-31 00:50:05 +08:00
|
|
|
// Any extra PATH/LD_LIBRARY_PATH dependencies required for the Library to operate properly
|
2024-11-13 02:31:52 +08:00
|
|
|
DependencyPath []string `json:"lib_path,omitempty"`
|
2024-03-31 00:50:05 +08:00
|
|
|
|
2024-06-20 04:35:38 +08:00
|
|
|
// Set to true if we can NOT reliably discover FreeMemory. A value of true indicates
|
|
|
|
// the FreeMemory is best effort, and may over or under report actual memory usage
|
|
|
|
// False indicates FreeMemory can generally be trusted on this GPU
|
|
|
|
UnreliableFreeMemory bool
|
|
|
|
|
2024-03-31 00:50:05 +08:00
|
|
|
// GPU information
|
2025-10-02 06:12:32 +08:00
|
|
|
filterID string // AMD Workaround: The numeric ID of the device used to filter out other devices
|
2025-08-30 03:17:31 +08:00
|
|
|
Name string `json:"name"` // user friendly name if available
|
|
|
|
Compute string `json:"compute"` // Compute Capability or gfx
|
2024-05-08 05:54:26 +08:00
|
|
|
|
|
|
|
// Driver Information - TODO no need to put this on each GPU
|
|
|
|
DriverMajor int `json:"driver_major,omitempty"`
|
|
|
|
DriverMinor int `json:"driver_minor,omitempty"`
|
2024-03-31 00:50:05 +08:00
|
|
|
|
|
|
|
// TODO other performance capability info to help in scheduling decisions
|
2023-11-30 03:00:37 +08:00
|
|
|
}
|
2024-02-12 06:50:06 +08:00
|
|
|
|
build: Make target improvements (#7499)
* llama: wire up builtin runner
This adds a new entrypoint into the ollama CLI to run the cgo built runner.
On Mac arm64, this will have GPU support, but on all other platforms it will
be the lowest common denominator CPU build. After we fully transition
to the new Go runners more tech-debt can be removed and we can stop building
the "default" runner via make and rely on the builtin always.
* build: Make target improvements
Add a few new targets and help for building locally.
This also adjusts the runner lookup to favor local builds, then
runners relative to the executable, and finally payloads.
* Support customized CPU flags for runners
This implements a simplified custom CPU flags pattern for the runners.
When built without overrides, the runner name contains the vector flag
we check for (AVX) to ensure we don't try to run on unsupported systems
and crash. If the user builds a customized set, we omit the naming
scheme and don't check for compatibility. This avoids checking
requirements at runtime, so that logic has been removed as well. This
can be used to build GPU runners with no vector flags, or CPU/GPU
runners with additional flags (e.g. AVX512) enabled.
* Use relative paths
If the user checks out the repo in a path that contains spaces, make gets
really confused so use relative paths for everything in-repo to avoid breakage.
* Remove payloads from main binary
* install: clean up prior libraries
This removes support for v0.3.6 and older versions (before the tar bundle)
and ensures we clean up prior libraries before extracting the bundle(s).
Without this change, runners and dependent libraries could leak when we
update and lead to subtle runtime errors.
2024-12-11 01:47:19 +08:00
|
|
|
func (gpu GpuInfo) RunnerName() string {
|
|
|
|
if gpu.Variant != "" {
|
|
|
|
return gpu.Library + "_" + gpu.Variant
|
|
|
|
}
|
|
|
|
return gpu.Library
|
|
|
|
}
|
|
|
|
|
2024-05-16 06:13:16 +08:00
|
|
|
type CPUInfo struct {
|
|
|
|
GpuInfo
|
2024-10-16 02:36:08 +08:00
|
|
|
CPUs []CPU
|
|
|
|
}
|
|
|
|
|
|
|
|
// CPU type represents a CPU Package occupying a socket
|
|
|
|
type CPU struct {
|
|
|
|
ID string `cpuinfo:"processor"`
|
|
|
|
VendorID string `cpuinfo:"vendor_id"`
|
|
|
|
ModelName string `cpuinfo:"model name"`
|
|
|
|
CoreCount int
|
|
|
|
EfficiencyCoreCount int // Performance = CoreCount - Efficiency
|
|
|
|
ThreadCount int
|
2024-05-16 06:13:16 +08:00
|
|
|
}
|
|
|
|
|
2024-03-31 00:50:05 +08:00
|
|
|
type GpuInfoList []GpuInfo
|
|
|
|
|
|
|
|
func (l GpuInfoList) ByLibrary() []GpuInfoList {
|
|
|
|
resp := []GpuInfoList{}
|
|
|
|
libs := []string{}
|
|
|
|
for _, info := range l {
|
|
|
|
found := false
|
|
|
|
requested := info.Library
|
2025-01-30 07:03:38 +08:00
|
|
|
if info.Variant != "" {
|
2024-05-31 12:54:07 +08:00
|
|
|
requested += "_" + info.Variant
|
2024-03-31 00:50:05 +08:00
|
|
|
}
|
|
|
|
for i, lib := range libs {
|
|
|
|
if lib == requested {
|
|
|
|
resp[i] = append(resp[i], info)
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
2024-08-24 06:11:56 +08:00
|
|
|
libs = append(libs, requested)
|
2024-03-31 00:50:05 +08:00
|
|
|
resp = append(resp, []GpuInfo{info})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return resp
|
2024-02-12 06:50:06 +08:00
|
|
|
}
|
2024-03-31 00:50:05 +08:00
|
|
|
|
2025-10-02 06:12:32 +08:00
|
|
|
func LogDetails(devices []ml.DeviceInfo) {
|
|
|
|
for _, dev := range devices {
|
|
|
|
var libs []string
|
|
|
|
for _, dir := range dev.LibraryPath {
|
|
|
|
if strings.Contains(dir, filepath.Join("lib", "ollama")) {
|
|
|
|
libs = append(libs, filepath.Base(dir))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
typeStr := "discrete"
|
|
|
|
if dev.Integrated {
|
|
|
|
typeStr = "iGPU"
|
|
|
|
}
|
2024-05-08 05:54:26 +08:00
|
|
|
slog.Info("inference compute",
|
2025-10-02 06:12:32 +08:00
|
|
|
"id", dev.ID,
|
|
|
|
"library", dev.Library,
|
|
|
|
"compute", dev.Compute(),
|
|
|
|
"name", dev.Name,
|
|
|
|
"description", dev.Description,
|
|
|
|
"libdirs", strings.Join(libs, ","),
|
|
|
|
"driver", dev.Driver(),
|
|
|
|
"pci_id", dev.PCIID,
|
|
|
|
"type", typeStr,
|
|
|
|
"total", format.HumanBytes2(dev.TotalMemory),
|
|
|
|
"available", format.HumanBytes2(dev.FreeMemory),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
// CPU inference
|
|
|
|
if len(devices) == 0 {
|
|
|
|
dev, _ := GetCPUMem()
|
|
|
|
slog.Info("inference compute",
|
|
|
|
"id", "cpu",
|
|
|
|
"library", "cpu",
|
|
|
|
"compute", "",
|
|
|
|
"name", "cpu",
|
|
|
|
"description", "cpu",
|
|
|
|
"libdirs", "ollama",
|
|
|
|
"driver", "",
|
|
|
|
"pci_id", "",
|
|
|
|
"type", "",
|
|
|
|
"total", format.HumanBytes2(dev.TotalMemory),
|
|
|
|
"available", format.HumanBytes2(dev.FreeMemory),
|
2024-05-08 05:54:26 +08:00
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-31 00:50:05 +08:00
|
|
|
// Sort by Free Space
|
|
|
|
type ByFreeMemory []GpuInfo
|
|
|
|
|
|
|
|
func (a ByFreeMemory) Len() int { return len(a) }
|
|
|
|
func (a ByFreeMemory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
|
|
func (a ByFreeMemory) Less(i, j int) bool { return a[i].FreeMemory < a[j].FreeMemory }
|
2024-05-16 06:13:16 +08:00
|
|
|
|
2024-10-15 07:26:45 +08:00
|
|
|
type SystemInfo struct {
|
2025-10-02 06:12:32 +08:00
|
|
|
System CPUInfo `json:"system"`
|
|
|
|
GPUs []GpuInfo `json:"gpus"`
|
2024-10-15 07:26:45 +08:00
|
|
|
}
|
2024-10-16 02:36:08 +08:00
|
|
|
|
|
|
|
// Return the optimal number of threads to use for inference
|
|
|
|
func (si SystemInfo) GetOptimalThreadCount() int {
|
|
|
|
if len(si.System.CPUs) == 0 {
|
2025-10-02 06:12:32 +08:00
|
|
|
// Fall back to Go's num CPU
|
|
|
|
return runtime.NumCPU()
|
2024-10-16 02:36:08 +08:00
|
|
|
}
|
2024-10-31 06:05:45 +08:00
|
|
|
|
|
|
|
coreCount := 0
|
|
|
|
for _, c := range si.System.CPUs {
|
|
|
|
coreCount += c.CoreCount - c.EfficiencyCoreCount
|
|
|
|
}
|
|
|
|
|
|
|
|
return coreCount
|
2024-10-16 02:36:08 +08:00
|
|
|
}
|
2024-12-04 07:57:19 +08:00
|
|
|
|
|
|
|
// For each GPU, check if it does NOT support flash attention
|
|
|
|
func (l GpuInfoList) FlashAttentionSupported() bool {
|
|
|
|
for _, gpu := range l {
|
2025-08-12 05:45:45 +08:00
|
|
|
supportsFA := gpu.Library == "cpu" ||
|
2025-10-02 06:12:32 +08:00
|
|
|
gpu.Name == "Metal" || gpu.Library == "Metal" ||
|
|
|
|
(gpu.Library == "CUDA" && gpu.DriverMajor >= 7) ||
|
|
|
|
gpu.Library == "ROCm"
|
2024-12-04 07:57:19 +08:00
|
|
|
|
|
|
|
if !supportsFA {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
2025-10-02 06:12:32 +08:00
|
|
|
|
|
|
|
type BaseRunner interface {
|
|
|
|
// GetPort returns the localhost port number the runner is running on
|
|
|
|
GetPort() int
|
|
|
|
|
|
|
|
// HasExited indicates if the runner is no longer running. This can be used during
|
|
|
|
// bootstrap to detect if a given filtered device is incompatible and triggered an assert
|
|
|
|
HasExited() bool
|
|
|
|
}
|
|
|
|
|
|
|
|
type RunnerDiscovery interface {
|
|
|
|
BaseRunner
|
|
|
|
|
|
|
|
// GetDeviceInfos will perform a query of the underlying device libraries
|
|
|
|
// for device identification and free VRAM information
|
|
|
|
// During bootstrap scenarios, this routine may take seconds to complete
|
|
|
|
GetDeviceInfos(ctx context.Context) []ml.DeviceInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
type FilteredRunnerDiscovery interface {
|
|
|
|
RunnerDiscovery
|
|
|
|
|
|
|
|
// GetActiveDeviceIDs returns the filtered set of devices actively in
|
|
|
|
// use by this runner for running models. If the runner is a bootstrap runner, no devices
|
|
|
|
// will be active yet so no device IDs are returned.
|
|
|
|
// This routine will not query the underlying device and will return immediately
|
|
|
|
GetActiveDeviceIDs() []ml.DeviceID
|
|
|
|
}
|