mirror of https://github.com/ollama/ollama.git
llm: Check for nil memory data before printing
We dump out our best memory estimate after we complete processing for any reason, including errors. This is helpful for finding what what stopped us in error conditions but in some cases we might not have gotten even the first result yet. Fixes #11957
This commit is contained in:
parent
d0698ae5f2
commit
6e02b81517
|
@ -651,7 +651,9 @@ func (s *ollamaServer) Load(ctx context.Context, gpus discover.GpuInfoList, requ
|
||||||
if !success {
|
if !success {
|
||||||
s.initModel(ctx, LoadRequest{}, LoadOperationClose)
|
s.initModel(ctx, LoadRequest{}, LoadOperationClose)
|
||||||
}
|
}
|
||||||
s.mem.Log(slog.LevelInfo)
|
if s.mem != nil {
|
||||||
|
s.mem.Log(slog.LevelInfo)
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
slog.Info("loading model", "model layers", s.totalLayers, "requested", s.options.NumGPU)
|
slog.Info("loading model", "model layers", s.totalLayers, "requested", s.options.NumGPU)
|
||||||
|
|
Loading…
Reference in New Issue