mirror of https://github.com/ollama/ollama.git
This change moves back to converting bf16 vision weights to fp16, specifically if they start with the name "v." (such as v.blk.0.attn_k.weight). This fixes a bug where converted images are failing because they are trying to call `im2col` which doesn't have a bf16 kernel in ggml. |
||
|---|---|---|
| .. | ||
| sentencepiece | ||
| testdata | ||
| convert.go | ||
| convert_bert.go | ||
| convert_commandr.go | ||
| convert_gemma.go | ||
| convert_gemma2.go | ||
| convert_gemma2_adapter.go | ||
| convert_gemma3.go | ||
| convert_gemma3n.go | ||
| convert_gptoss.go | ||
| convert_llama.go | ||
| convert_llama4.go | ||
| convert_llama_adapter.go | ||
| convert_mistral.go | ||
| convert_mixtral.go | ||
| convert_mllama.go | ||
| convert_phi3.go | ||
| convert_qwen2.go | ||
| convert_qwen25vl.go | ||
| convert_test.go | ||
| reader.go | ||
| reader_safetensors.go | ||
| reader_test.go | ||
| reader_torch.go | ||
| sentencepiece_model.proto | ||
| tensor.go | ||
| tensor_test.go | ||
| tokenizer.go | ||
| tokenizer_spm.go | ||
| tokenizer_test.go | ||